S: Supported
F: drivers/scsi/bfa/
+BROCADE BNA 10 GIGABIT ETHERNET DRIVER
+M: Rasesh Mody <rmody@brocade.com>
+M: Debashis Dutt <ddutt@brocade.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/bna/
+
BSG (block layer generic sg v4 driver)
M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
L: linux-scsi@vger.kernel.org
To compile this driver as a module, choose M here: the module
will be called qlge.
+config BNA
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
+
+ For general information and support, go to the Brocade support
+ website at:
+
+ <http://support.brocade.com>
+
source "drivers/net/sfc/Kconfig"
source "drivers/net/benet/Kconfig"
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_BE2NET) += benet/
obj-$(CONFIG_VMXNET3) += vmxnet3/
+obj-$(CONFIG_BNA) += bna/
gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o \
--- /dev/null
+#
+# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+# All rights reserved.
+#
+
+obj-$(CONFIG_BNA) += bna.o
+
+bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
+bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
+
+EXTRA_CFLAGS := -Idrivers/net/bna
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_defs_cna.h"
+#include "cna.h"
+#include "bfa_cee.h"
+#include "bfi_cna.h"
+#include "bfa_ioc.h"
+
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
+
+static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
+static void bfa_cee_format_cee_cfg(void *buffer);
+
+static void
+bfa_cee_format_cee_cfg(void *buffer)
+{
+ struct bfa_cee_attr *cee_cfg = buffer;
+ bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
+}
+
+static void
+bfa_cee_stats_swap(struct bfa_cee_stats *stats)
+{
+ u32 *buffer = (u32 *)stats;
+ int i;
+
+ for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
+ i++) {
+ buffer[i] = ntohl(buffer[i]);
+ }
+}
+
+static void
+bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
+{
+ lldp_cfg->time_to_live =
+ ntohs(lldp_cfg->time_to_live);
+ lldp_cfg->enabled_system_cap =
+ ntohs(lldp_cfg->enabled_system_cap);
+}
+
+/**
+ * bfa_cee_attr_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE attributes
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_attr_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
+}
+/**
+ * bfa_cee_stats_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE stats
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_stats_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_attr_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->attr, cee->attr_dma.kva,
+ sizeof(struct bfa_cee_attr));
+ bfa_cee_format_cee_cfg(cee->attr);
+ }
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn)
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_stats_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->stats, cee->stats_dma.kva,
+ sizeof(struct bfa_cee_stats));
+ bfa_cee_stats_swap(cee->stats);
+ }
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn)
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->reset_stats_status = status;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn)
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+/**
+ * bfa_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_cee_meminfo(void)
+{
+ return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
+}
+
+/**
+ * bfa_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ * dma_kva Kernel Virtual Address of CEE DMA Memory
+ * dma_pa Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
+{
+ cee->attr_dma.kva = dma_kva;
+ cee->attr_dma.pa = dma_pa;
+ cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
+ cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
+ cee->attr = (struct bfa_cee_attr *) dma_kva;
+ cee->stats = (struct bfa_cee_stats *)
+ (dma_kva + bfa_cee_attr_meminfo());
+}
+
+/**
+ * bfa_cee_get_attr()
+ *
+ * @brief
+ * Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+enum bfa_status
+bfa_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req *cmd;
+
+ BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+ if (!bfa_ioc_is_operational(cee->ioc))
+ return BFA_STATUS_IOC_FAILURE;
+ if (cee->get_attr_pending == true)
+ return BFA_STATUS_DEVBUSY;
+ cee->get_attr_pending = true;
+ cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * bfa_cee_get_stats()
+ *
+ * @brief
+ * Send the request to the f/w to fetch CEE statistics.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+enum bfa_status
+bfa_cee_get_stats(struct bfa_cee *cee, struct bfa_cee_stats *stats,
+ bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req *cmd;
+
+ BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+
+ if (!bfa_ioc_is_operational(cee->ioc))
+ return BFA_STATUS_IOC_FAILURE;
+ if (cee->get_stats_pending == true)
+ return BFA_STATUS_DEVBUSY;
+ cee->get_stats_pending = true;
+ cmd = (struct bfi_cee_get_req *) cee->get_stats_mb.msg;
+ cee->stats = stats;
+ cee->cbfn.get_stats_cbfn = cbfn;
+ cee->cbfn.get_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * bfa_cee_reset_stats()
+ *
+ * @brief Clears CEE Stats in the f/w.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+enum bfa_status
+bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
+ void *cbarg)
+{
+ struct bfi_cee_reset_stats *cmd;
+
+ BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+ if (!bfa_ioc_is_operational(cee->ioc))
+ return BFA_STATUS_IOC_FAILURE;
+ if (cee->reset_stats_pending == true)
+ return BFA_STATUS_DEVBUSY;
+ cee->reset_stats_pending = true;
+ cmd = (struct bfi_cee_reset_stats *) cee->reset_stats_mb.msg;
+ cee->cbfn.reset_stats_cbfn = cbfn;
+ cee->cbfn.reset_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+ bfa_ioc_portid(cee->ioc));
+ bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+ return BFA_STATUS_OK;
+}
+
+/**
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
+{
+ union bfi_cee_i2h_msg_u *msg;
+ struct bfi_cee_get_rsp *get_rsp;
+ struct bfa_cee *cee = (struct bfa_cee *) cbarg;
+ msg = (union bfi_cee_i2h_msg_u *) m;
+ get_rsp = (struct bfi_cee_get_rsp *) m;
+ switch (msg->mh.msg_id) {
+ case BFI_CEE_I2H_GET_CFG_RSP:
+ bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_GET_STATS_RSP:
+ bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_RESET_STATS_RSP:
+ bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * bfa_cee_hbfail()
+ *
+ * @brief CEE module heart-beat failure handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_hbfail(void *arg)
+{
+ struct bfa_cee *cee;
+ cee = (struct bfa_cee *) arg;
+
+ if (cee->get_attr_pending == true) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->get_stats_pending == true) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->reset_stats_pending == true) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+}
+
+/**
+ * bfa_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
+ void *dev)
+{
+ BUG_ON(!(cee != NULL));
+ cee->dev = dev;
+ cee->ioc = ioc;
+
+ bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
+ bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+}
+
+/**
+ * bfa_cee_detach()
+ *
+ * @brief CEE module-detach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ *
+ * @return void
+ */
+void
+bfa_cee_detach(struct bfa_cee *cee)
+{
+}
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_CEE_H__
+#define __BFA_CEE_H__
+
+#include "bfa_defs_cna.h"
+#include "bfa_ioc.h"
+
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
+
+struct bfa_cee_cbfn {
+ bfa_cee_get_attr_cbfn_t get_attr_cbfn;
+ void *get_attr_cbarg;
+ bfa_cee_get_stats_cbfn_t get_stats_cbfn;
+ void *get_stats_cbarg;
+ bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
+ void *reset_stats_cbarg;
+};
+
+struct bfa_cee {
+ void *dev;
+ bool get_attr_pending;
+ bool get_stats_pending;
+ bool reset_stats_pending;
+ enum bfa_status get_attr_status;
+ enum bfa_status get_stats_status;
+ enum bfa_status reset_stats_status;
+ struct bfa_cee_cbfn cbfn;
+ struct bfa_ioc_hbfail_notify hbfail;
+ struct bfa_cee_attr *attr;
+ struct bfa_cee_stats *stats;
+ struct bfa_dma attr_dma;
+ struct bfa_dma stats_dma;
+ struct bfa_ioc *ioc;
+ struct bfa_mbox_cmd get_cfg_mb;
+ struct bfa_mbox_cmd get_stats_mb;
+ struct bfa_mbox_cmd reset_stats_mb;
+};
+
+u32 bfa_cee_meminfo(void);
+void bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
+ u64 dma_pa);
+void bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
+void bfa_cee_detach(struct bfa_cee *cee);
+enum bfa_status bfa_cee_get_attr(struct bfa_cee *cee,
+ struct bfa_cee_attr *attr, bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
+enum bfa_status bfa_cee_get_stats(struct bfa_cee *cee,
+ struct bfa_cee_stats *stats, bfa_cee_get_stats_cbfn_t cbfn,
+ void *cbarg);
+enum bfa_status bfa_cee_reset_stats(struct bfa_cee *cee,
+ bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
+
+#endif /* __BFA_CEE_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_DEFS_H__
+#define __BFA_DEFS_H__
+
+#include "cna.h"
+#include "bfa_defs_status.h"
+#include "bfa_defs_mfg_comm.h"
+
+#define BFA_STRING_32 32
+#define BFA_VERSION_LEN 64
+
+/**
+ * ---------------------- adapter definitions ------------
+ */
+
+/**
+ * BFA adapter level attributes.
+ */
+enum {
+ BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
+ /*
+ *!< adapter serial num length
+ */
+ BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
+ BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
+ BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
+ BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
+ BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
+};
+
+struct bfa_adapter_attr {
+ char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 card_type;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+ u64 pwwn;
+ char node_symname[FC_SYMNAME_MAX];
+ char hw_ver[BFA_VERSION_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ char optrom_ver[BFA_VERSION_LEN];
+ char os_type[BFA_ADAPTER_OS_TYPE_LEN];
+ struct bfa_mfg_vpd vpd;
+ struct mac mac;
+
+ u8 nports;
+ u8 max_speed;
+ u8 prototype;
+ char asic_rev;
+
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 cna_capable;
+
+ u8 is_mezz;
+ u8 trunk_capable;
+};
+
+/**
+ * ---------------------- IOC definitions ------------
+ */
+
+enum {
+ BFA_IOC_DRIVER_LEN = 16,
+ BFA_IOC_CHIP_REV_LEN = 8,
+};
+
+/**
+ * Driver and firmware versions.
+ */
+struct bfa_ioc_driver_attr {
+ char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
+ char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
+ char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
+ char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
+ char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
+ char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
+};
+
+/**
+ * IOC PCI device attributes
+ */
+struct bfa_ioc_pci_attr {
+ u16 vendor_id; /*!< PCI vendor ID */
+ u16 device_id; /*!< PCI device ID */
+ u16 ssid; /*!< subsystem ID */
+ u16 ssvid; /*!< subsystem vendor ID */
+ u32 pcifn; /*!< PCI device function */
+ u32 rsvd; /* padding */
+ char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
+};
+
+/**
+ * IOC states
+ */
+enum bfa_ioc_state {
+ BFA_IOC_RESET = 1, /*!< IOC is in reset state */
+ BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
+ BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */
+ BFA_IOC_GETATTR = 4, /*!< IOC is being configured */
+ BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */
+ BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */
+ BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */
+ BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */
+ BFA_IOC_DISABLED = 9, /*!< IOC is disabled */
+ BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */
+};
+
+/**
+ * IOC firmware stats
+ */
+struct bfa_fw_ioc_stats {
+ u32 enable_reqs;
+ u32 disable_reqs;
+ u32 get_attr_reqs;
+ u32 dbg_sync;
+ u32 dbg_dump;
+ u32 unknown_reqs;
+};
+
+/**
+ * IOC driver stats
+ */
+struct bfa_ioc_drv_stats {
+ u32 ioc_isrs;
+ u32 ioc_enables;
+ u32 ioc_disables;
+ u32 ioc_hbfails;
+ u32 ioc_boots;
+ u32 stats_tmos;
+ u32 hb_count;
+ u32 disable_reqs;
+ u32 enable_reqs;
+ u32 disable_replies;
+ u32 enable_replies;
+};
+
+/**
+ * IOC statistics
+ */
+struct bfa_ioc_stats {
+ struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
+ struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
+};
+
+enum bfa_ioc_type {
+ BFA_IOC_TYPE_FC = 1,
+ BFA_IOC_TYPE_FCoE = 2,
+ BFA_IOC_TYPE_LL = 3,
+};
+
+/**
+ * IOC attributes returned in queries
+ */
+struct bfa_ioc_attr {
+ enum bfa_ioc_type ioc_type;
+ enum bfa_ioc_state state; /*!< IOC state */
+ struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
+ struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
+ struct bfa_ioc_pci_attr pci_attr;
+ u8 port_id; /*!< port number */
+ u8 rsvd[7]; /*!< 64bit align */
+};
+
+/**
+ * ---------------------- mfg definitions ------------
+ */
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE 16
+
+#define BFA_MFG_PARTNUM_SIZE 14
+#define BFA_MFG_SUPPLIER_ID_SIZE 10
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
+
+#pragma pack(1)
+
+/**
+ * @brief BFA adapter manufacturing block definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block {
+ u8 version; /*!< manufacturing block version */
+ u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+ u16 mfgsize; /*!< mfg block size */
+ u16 u16_chksum; /*!< old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /*!< manufacturing day */
+ u8 mfg_month; /*!< manufacturing month */
+ u16 mfg_year; /*!< manufacturing year */
+ u64 mfg_wwn; /*!< wwn base for this adapter */
+ u8 num_wwn; /*!< number of wwns assigned */
+ u8 mfg_speeds; /*!< speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char
+ supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char
+ supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /*!< mac address */
+ u8 num_mac; /*!< number of mac addresses */
+ u8 rsv2;
+ u32 mfg_type; /*!< card type */
+ u8 rsv3[108];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+};
+
+#pragma pack()
+
+/**
+ * ---------------------- pci definitions ------------
+ */
+
+#define bfa_asic_id_ct(devid) \
+ ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
+ (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
+
+#endif /* __BFA_DEFS_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_DEFS_CNA_H__
+#define __BFA_DEFS_CNA_H__
+
+#include "bfa_defs.h"
+
+/**
+ * @brief
+ * FC physical port statistics.
+ */
+struct bfa_port_fc_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 tx_frames; /*!< Tx frames */
+ u64 tx_words; /*!< Tx words */
+ u64 tx_lip; /*!< Tx LIP */
+ u64 tx_nos; /*!< Tx NOS */
+ u64 tx_ols; /*!< Tx OLS */
+ u64 tx_lr; /*!< Tx LR */
+ u64 tx_lrr; /*!< Tx LRR */
+ u64 rx_frames; /*!< Rx frames */
+ u64 rx_words; /*!< Rx words */
+ u64 lip_count; /*!< Rx LIP */
+ u64 nos_count; /*!< Rx NOS */
+ u64 ols_count; /*!< Rx OLS */
+ u64 lr_count; /*!< Rx LR */
+ u64 lrr_count; /*!< Rx LRR */
+ u64 invalid_crcs; /*!< Rx CRC err frames */
+ u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
+ u64 undersized_frm; /*!< Rx undersized frames */
+ u64 oversized_frm; /*!< Rx oversized frames */
+ u64 bad_eof_frm; /*!< Rx frames with bad EOF */
+ u64 error_frames; /*!< Errored frames */
+ u64 dropped_frames; /*!< Dropped frames */
+ u64 link_failures; /*!< Link Failure (LF) count */
+ u64 loss_of_syncs; /*!< Loss of sync count */
+ u64 loss_of_signals; /*!< Loss of signal count */
+ u64 primseq_errs; /*!< Primitive sequence protocol err. */
+ u64 bad_os_count; /*!< Invalid ordered sets */
+ u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
+ u64 err_enc; /*!< Encoding err frame_8b10b */
+};
+
+/**
+ * @brief
+ * Eth Physical Port statistics.
+ */
+struct bfa_port_eth_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 frame_64; /*!< Frames 64 bytes */
+ u64 frame_65_127; /*!< Frames 65-127 bytes */
+ u64 frame_128_255; /*!< Frames 128-255 bytes */
+ u64 frame_256_511; /*!< Frames 256-511 bytes */
+ u64 frame_512_1023; /*!< Frames 512-1023 bytes */
+ u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
+ u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
+ u64 tx_bytes; /*!< Tx bytes */
+ u64 tx_packets; /*!< Tx packets */
+ u64 tx_mcast_packets; /*!< Tx multicast packets */
+ u64 tx_bcast_packets; /*!< Tx broadcast packets */
+ u64 tx_control_frame; /*!< Tx control frame */
+ u64 tx_drop; /*!< Tx drops */
+ u64 tx_jabber; /*!< Tx jabber */
+ u64 tx_fcs_error; /*!< Tx FCS errors */
+ u64 tx_fragments; /*!< Tx fragments */
+ u64 rx_bytes; /*!< Rx bytes */
+ u64 rx_packets; /*!< Rx packets */
+ u64 rx_mcast_packets; /*!< Rx multicast packets */
+ u64 rx_bcast_packets; /*!< Rx broadcast packets */
+ u64 rx_control_frames; /*!< Rx control frames */
+ u64 rx_unknown_opcode; /*!< Rx unknown opcode */
+ u64 rx_drop; /*!< Rx drops */
+ u64 rx_jabber; /*!< Rx jabber */
+ u64 rx_fcs_error; /*!< Rx FCS errors */
+ u64 rx_alignment_error; /*!< Rx alignment errors */
+ u64 rx_frame_length_error; /*!< Rx frame len errors */
+ u64 rx_code_error; /*!< Rx code errors */
+ u64 rx_fragments; /*!< Rx fragments */
+ u64 rx_pause; /*!< Rx pause */
+ u64 rx_zero_pause; /*!< Rx zero pause */
+ u64 tx_pause; /*!< Tx pause */
+ u64 tx_zero_pause; /*!< Tx zero pause */
+ u64 rx_fcoe_pause; /*!< Rx FCoE pause */
+ u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
+ u64 tx_fcoe_pause; /*!< Tx FCoE pause */
+ u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
+};
+
+/**
+ * @brief
+ * Port statistics.
+ */
+union bfa_port_stats_u {
+ struct bfa_port_fc_stats fc;
+ struct bfa_port_eth_stats eth;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY (8)
+#define BFA_CEE_DCBX_MAX_PGID (8)
+
+#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
+#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
+#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
+#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
+#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
+#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
+#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
+#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
+#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
+#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
+#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
+
+/* LLDP string type */
+struct bfa_cee_lldp_str {
+ u8 sub_type;
+ u8 len;
+ u8 rsvd[2];
+ u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+/* LLDP paramters */
+struct bfa_cee_lldp_cfg {
+ struct bfa_cee_lldp_str chassis_id;
+ struct bfa_cee_lldp_str port_id;
+ struct bfa_cee_lldp_str port_desc;
+ struct bfa_cee_lldp_str sys_name;
+ struct bfa_cee_lldp_str sys_desc;
+ struct bfa_cee_lldp_str mgmt_addr;
+ u16 time_to_live;
+ u16 enabled_system_cap;
+};
+
+enum bfa_cee_dcbx_version {
+ DCBX_PROTOCOL_PRECEE = 1,
+ DCBX_PROTOCOL_CEE = 2,
+};
+
+enum bfa_cee_lls {
+ /* LLS is down because the TLV not sent by the peer */
+ CEE_LLS_DOWN_NO_TLV = 0,
+ /* LLS is down as advertised by the peer */
+ CEE_LLS_DOWN = 1,
+ CEE_LLS_UP = 2,
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg {
+ u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+ u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+ u8 pfc_primap; /* bitmap of priorties with PFC enabled */
+ u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+ u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+ u8 dcbx_version; /* operating version:CEE or preCEE */
+ u8 lls_fcoe; /* FCoE Logical Link Status */
+ u8 lls_lan; /* LAN Logical Link Status */
+ u8 rsvd[2];
+};
+
+/* CEE status */
+/* Making this to tri-state for the benefit of port list command */
+enum bfa_cee_status {
+ CEE_UP = 0,
+ CEE_PHY_UP = 1,
+ CEE_LOOPBACK = 2,
+ CEE_PHY_DOWN = 3,
+};
+
+/* CEE Query */
+struct bfa_cee_attr {
+ u8 cee_status;
+ u8 error_reason;
+ struct bfa_cee_lldp_cfg lldp_remote;
+ struct bfa_cee_dcbx_cfg dcbx_remote;
+ mac_t src_mac;
+ u8 link_speed;
+ u8 nw_priority;
+ u8 filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats {
+ u32 lldp_tx_frames; /*!< LLDP Tx Frames */
+ u32 lldp_rx_frames; /*!< LLDP Rx Frames */
+ u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
+ u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
+ u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
+ u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
+ u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
+ u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
+ u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
+ u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
+ u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
+ u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
+ u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
+ u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
+ u32 cee_status_down; /*!< CEE status down */
+ u32 cee_status_up; /*!< CEE status up */
+ u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
+ u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_CNA_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_MFG_COMM_H__
+#define __BFA_DEFS_MFG_COMM_H__
+
+#include "cna.h"
+
+/**
+ * Manufacturing block version
+ */
+#define BFA_MFG_VERSION 2
+#define BFA_MFG_VERSION_UNINIT 0xFF
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER 2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN 128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN 4
+
+#define BFA_MFG_SERIALNUM_SIZE 11
+#define STRSZ(_n) (((_n) + 4) & ~3)
+
+/**
+ * Manufacturing card type
+ */
+enum {
+ BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
+ BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
+ BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
+ BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
+ BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
+ BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
+ BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
+ BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
+ BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
+ BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
+ BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
+ BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
+ BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
+};
+
+#pragma pack(1)
+
+/**
+ * Check if 1-port card
+ */
+#define bfa_mfg_is_1port(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P1))
+
+/**
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE || \
+ (type) == BFA_MFG_TYPE_ASTRA || \
+ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+ (type) == BFA_MFG_TYPE_LIGHTNING))
+
+/**
+ * Check if card type valid
+ */
+#define bfa_mfg_is_card_type_valid(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P2 || \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P2 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P2 || \
+ (type) == BFA_MFG_TYPE_CNA10P1 || \
+ bfa_mfg_is_mezz(type)))
+
+/**
+ * Check if the card having old wwn/mac handling
+ */
+#define bfa_mfg_is_old_wwn_mac_model(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P2 || \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P2 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P2 || \
+ (type) == BFA_MFG_TYPE_CNA10P1 || \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE))
+
+#define bfa_mfg_increment_wwn_mac(m, i) \
+do { \
+ u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
+ t += (i); \
+ (m)[0] = (t >> 16) & 0xFF; \
+ (m)[1] = (t >> 8) & 0xFF; \
+ (m)[2] = t & 0xFF; \
+} while (0)
+
+#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
+do { \
+ switch ((card_type)) { \
+ case BFA_MFG_TYPE_FC8P2: \
+ case BFA_MFG_TYPE_JAYHAWK: \
+ case BFA_MFG_TYPE_ASTRA: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
+ BFI_ADAPTER_SETP(SPEED, 8); \
+ break; \
+ case BFA_MFG_TYPE_FC8P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
+ BFI_ADAPTER_SETP(SPEED, 8); \
+ break; \
+ case BFA_MFG_TYPE_FC4P2: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
+ BFI_ADAPTER_SETP(SPEED, 4); \
+ break; \
+ case BFA_MFG_TYPE_FC4P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
+ BFI_ADAPTER_SETP(SPEED, 4); \
+ break; \
+ case BFA_MFG_TYPE_CNA10P2: \
+ case BFA_MFG_TYPE_WANCHESE: \
+ case BFA_MFG_TYPE_LIGHTNING_P0: \
+ case BFA_MFG_TYPE_LIGHTNING: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
+ break; \
+ case BFA_MFG_TYPE_CNA10P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
+ break; \
+ default: \
+ (prop) = BFI_ADAPTER_UNSUPP; \
+ } \
+} while (0)
+
+enum {
+ CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
+ CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
+ CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
+ CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
+ CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
+ CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
+ CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+};
+
+#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
+do { \
+ if ((gpio) & CB_GPIO_PROTO) { \
+ (prop) |= BFI_ADAPTER_PROTO; \
+ (gpio) &= ~CB_GPIO_PROTO; \
+ } \
+ switch ((gpio)) { \
+ case CB_GPIO_TTV: \
+ (prop) |= BFI_ADAPTER_TTV; \
+ case CB_GPIO_DFLY: \
+ case CB_GPIO_FC8P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P2; \
+ break; \
+ case CB_GPIO_FC8P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P1; \
+ break; \
+ case CB_GPIO_FC4P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P2; \
+ break; \
+ case CB_GPIO_FC4P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P1; \
+ break; \
+ default: \
+ (prop) |= BFI_ADAPTER_UNSUPP; \
+ (card_type) = BFA_MFG_TYPE_INVALID; \
+ } \
+} while (0)
+
+/**
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN 512
+#define BFA_MFG_VPD_LEN_INVALID 0
+
+#define BFA_MFG_VPD_PCI_HDR_OFF 137
+#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
+#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
+
+/**
+ * VPD vendor tag
+ */
+enum {
+ BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
+ BFA_MFG_VPD_HP = 2, /*!< vendor HP */
+ BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
+};
+
+/**
+ * @brief BFA adapter flash vpd data definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd {
+ u8 version; /*!< vpd data version */
+ u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
+ u8 chksum; /*!< u8 checksum */
+ u8 vendor; /*!< vendor */
+ u8 len; /*!< vpd data length excluding header */
+ u8 rsv;
+ u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_MFG_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_STATUS_H__
+#define __BFA_DEFS_STATUS_H__
+
+/**
+ * API status return values
+ *
+ * NOTE: The error msgs are auto generated from the comments. Only singe line
+ * comments are supported
+ */
+enum bfa_status {
+ BFA_STATUS_OK = 0,
+ BFA_STATUS_FAILED = 1,
+ BFA_STATUS_EINVAL = 2,
+ BFA_STATUS_ENOMEM = 3,
+ BFA_STATUS_ENOSYS = 4,
+ BFA_STATUS_ETIMER = 5,
+ BFA_STATUS_EPROTOCOL = 6,
+ BFA_STATUS_ENOFCPORTS = 7,
+ BFA_STATUS_NOFLASH = 8,
+ BFA_STATUS_BADFLASH = 9,
+ BFA_STATUS_SFP_UNSUPP = 10,
+ BFA_STATUS_UNKNOWN_VFID = 11,
+ BFA_STATUS_DATACORRUPTED = 12,
+ BFA_STATUS_DEVBUSY = 13,
+ BFA_STATUS_ABORTED = 14,
+ BFA_STATUS_NODEV = 15,
+ BFA_STATUS_HDMA_FAILED = 16,
+ BFA_STATUS_FLASH_BAD_LEN = 17,
+ BFA_STATUS_UNKNOWN_LWWN = 18,
+ BFA_STATUS_UNKNOWN_RWWN = 19,
+ BFA_STATUS_FCPT_LS_RJT = 20,
+ BFA_STATUS_VPORT_EXISTS = 21,
+ BFA_STATUS_VPORT_MAX = 22,
+ BFA_STATUS_UNSUPP_SPEED = 23,
+ BFA_STATUS_INVLD_DFSZ = 24,
+ BFA_STATUS_CNFG_FAILED = 25,
+ BFA_STATUS_CMD_NOTSUPP = 26,
+ BFA_STATUS_NO_ADAPTER = 27,
+ BFA_STATUS_LINKDOWN = 28,
+ BFA_STATUS_FABRIC_RJT = 29,
+ BFA_STATUS_UNKNOWN_VWWN = 30,
+ BFA_STATUS_NSLOGIN_FAILED = 31,
+ BFA_STATUS_NO_RPORTS = 32,
+ BFA_STATUS_NSQUERY_FAILED = 33,
+ BFA_STATUS_PORT_OFFLINE = 34,
+ BFA_STATUS_RPORT_OFFLINE = 35,
+ BFA_STATUS_TGTOPEN_FAILED = 36,
+ BFA_STATUS_BAD_LUNS = 37,
+ BFA_STATUS_IO_FAILURE = 38,
+ BFA_STATUS_NO_FABRIC = 39,
+ BFA_STATUS_EBADF = 40,
+ BFA_STATUS_EINTR = 41,
+ BFA_STATUS_EIO = 42,
+ BFA_STATUS_ENOTTY = 43,
+ BFA_STATUS_ENXIO = 44,
+ BFA_STATUS_EFOPEN = 45,
+ BFA_STATUS_VPORT_WWN_BP = 46,
+ BFA_STATUS_PORT_NOT_DISABLED = 47,
+ BFA_STATUS_BADFRMHDR = 48,
+ BFA_STATUS_BADFRMSZ = 49,
+ BFA_STATUS_MISSINGFRM = 50,
+ BFA_STATUS_LINKTIMEOUT = 51,
+ BFA_STATUS_NO_FCPIM_NEXUS = 52,
+ BFA_STATUS_CHECKSUM_FAIL = 53,
+ BFA_STATUS_GZME_FAILED = 54,
+ BFA_STATUS_SCSISTART_REQD = 55,
+ BFA_STATUS_IOC_FAILURE = 56,
+ BFA_STATUS_INVALID_WWN = 57,
+ BFA_STATUS_MISMATCH = 58,
+ BFA_STATUS_IOC_ENABLED = 59,
+ BFA_STATUS_ADAPTER_ENABLED = 60,
+ BFA_STATUS_IOC_NON_OP = 61,
+ BFA_STATUS_ADDR_MAP_FAILURE = 62,
+ BFA_STATUS_SAME_NAME = 63,
+ BFA_STATUS_PENDING = 64,
+ BFA_STATUS_8G_SPD = 65,
+ BFA_STATUS_4G_SPD = 66,
+ BFA_STATUS_AD_IS_ENABLE = 67,
+ BFA_STATUS_EINVAL_TOV = 68,
+ BFA_STATUS_EINVAL_QDEPTH = 69,
+ BFA_STATUS_VERSION_FAIL = 70,
+ BFA_STATUS_DIAG_BUSY = 71,
+ BFA_STATUS_BEACON_ON = 72,
+ BFA_STATUS_BEACON_OFF = 73,
+ BFA_STATUS_LBEACON_ON = 74,
+ BFA_STATUS_LBEACON_OFF = 75,
+ BFA_STATUS_PORT_NOT_INITED = 76,
+ BFA_STATUS_RPSC_ENABLED = 77,
+ BFA_STATUS_ENOFSAVE = 78,
+ BFA_STATUS_BAD_FILE = 79,
+ BFA_STATUS_RLIM_EN = 80,
+ BFA_STATUS_RLIM_DIS = 81,
+ BFA_STATUS_IOC_DISABLED = 82,
+ BFA_STATUS_ADAPTER_DISABLED = 83,
+ BFA_STATUS_BIOS_DISABLED = 84,
+ BFA_STATUS_AUTH_ENABLED = 85,
+ BFA_STATUS_AUTH_DISABLED = 86,
+ BFA_STATUS_ERROR_TRL_ENABLED = 87,
+ BFA_STATUS_ERROR_QOS_ENABLED = 88,
+ BFA_STATUS_NO_SFP_DEV = 89,
+ BFA_STATUS_MEMTEST_FAILED = 90,
+ BFA_STATUS_INVALID_DEVID = 91,
+ BFA_STATUS_QOS_ENABLED = 92,
+ BFA_STATUS_QOS_DISABLED = 93,
+ BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
+ BFA_STATUS_REG_FAIL = 95,
+ BFA_STATUS_IM_INV_CODE = 96,
+ BFA_STATUS_IM_INV_VLAN = 97,
+ BFA_STATUS_IM_INV_ADAPT_NAME = 98,
+ BFA_STATUS_IM_LOW_RESOURCES = 99,
+ BFA_STATUS_IM_VLANID_IS_PVID = 100,
+ BFA_STATUS_IM_VLANID_EXISTS = 101,
+ BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
+ BFA_STATUS_PORTLOG_ENABLED = 103,
+ BFA_STATUS_PORTLOG_DISABLED = 104,
+ BFA_STATUS_FILE_NOT_FOUND = 105,
+ BFA_STATUS_QOS_FC_ONLY = 106,
+ BFA_STATUS_RLIM_FC_ONLY = 107,
+ BFA_STATUS_CT_SPD = 108,
+ BFA_STATUS_LEDTEST_OP = 109,
+ BFA_STATUS_CEE_NOT_DN = 110,
+ BFA_STATUS_10G_SPD = 111,
+ BFA_STATUS_IM_INV_TEAM_NAME = 112,
+ BFA_STATUS_IM_DUP_TEAM_NAME = 113,
+ BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
+ BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
+ BFA_STATUS_IM_PVID_MISMATCH = 116,
+ BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
+ BFA_STATUS_IM_MTU_MISMATCH = 118,
+ BFA_STATUS_IM_RSS_MISMATCH = 119,
+ BFA_STATUS_IM_HDS_MISMATCH = 120,
+ BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
+ BFA_STATUS_IM_PORT_PARAMS = 122,
+ BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
+ BFA_STATUS_IM_CANNOT_REM_PRI = 124,
+ BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
+ BFA_STATUS_IM_LAST_PORT_DELETE = 126,
+ BFA_STATUS_IM_NO_DRIVER = 127,
+ BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
+ BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
+ BFA_STATUS_NO_MINPORT_DRIVER = 130,
+ BFA_STATUS_CARD_TYPE_MISMATCH = 131,
+ BFA_STATUS_BAD_ASICBLK = 132,
+ BFA_STATUS_NO_DRIVER = 133,
+ BFA_STATUS_INVALID_MAC = 134,
+ BFA_STATUS_IM_NO_VLAN = 135,
+ BFA_STATUS_IM_ETH_LB_FAILED = 136,
+ BFA_STATUS_IM_PVID_REMOVE = 137,
+ BFA_STATUS_IM_PVID_EDIT = 138,
+ BFA_STATUS_CNA_NO_BOOT = 139,
+ BFA_STATUS_IM_PVID_NON_ZERO = 140,
+ BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
+ BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
+ BFA_STATUS_IM_NOT_BOUND = 143,
+ BFA_STATUS_INSUFFICIENT_PERMS = 144,
+ BFA_STATUS_IM_INV_VLAN_NAME = 145,
+ BFA_STATUS_CMD_NOTSUPP_CNA = 146,
+ BFA_STATUS_IM_PASSTHRU_EDIT = 147,
+ BFA_STATUS_IM_BIND_FAILED = 148,
+ BFA_STATUS_IM_UNBIND_FAILED = 149,
+ BFA_STATUS_IM_PORT_IN_TEAM = 150,
+ BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
+ BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
+ BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
+ BFA_STATUS_PBC = 154,
+ BFA_STATUS_DEVID_MISSING = 155,
+ BFA_STATUS_BAD_FWCFG = 156,
+ BFA_STATUS_CREATE_FILE = 157,
+ BFA_STATUS_INVALID_VENDOR = 158,
+ BFA_STATUS_SFP_NOT_READY = 159,
+ BFA_STATUS_FLASH_UNINIT = 160,
+ BFA_STATUS_FLASH_EMPTY = 161,
+ BFA_STATUS_FLASH_CKFAIL = 162,
+ BFA_STATUS_TRUNK_UNSUPP = 163,
+ BFA_STATUS_TRUNK_ENABLED = 164,
+ BFA_STATUS_TRUNK_DISABLED = 165,
+ BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
+ BFA_STATUS_BOOT_CODE_UPDATED = 167,
+ BFA_STATUS_BOOT_VERSION = 168,
+ BFA_STATUS_CARDTYPE_MISSING = 169,
+ BFA_STATUS_INVALID_CARDTYPE = 170,
+ BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
+ BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
+ BFA_STATUS_ETHBOOT_ENABLED = 173,
+ BFA_STATUS_ETHBOOT_DISABLED = 174,
+ BFA_STATUS_IOPROFILE_OFF = 175,
+ BFA_STATUS_NO_PORT_INSTANCE = 176,
+ BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
+ BFA_STATUS_NO_VPORT_LOCK = 178,
+ BFA_STATUS_VPORT_NO_CNFG = 179,
+ BFA_STATUS_MAX_VAL
+};
+
+enum bfa_eproto_status {
+ BFA_EPROTO_BAD_ACCEPT = 0,
+ BFA_EPROTO_UNKNOWN_RSP = 1
+};
+
+#endif /* __BFA_DEFS_STATUS_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+
+/**
+ * IOC local definitions
+ */
+
+#define bfa_ioc_timer_start(__ioc) \
+ mod_timer(&(__ioc)->ioc_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_TOV))
+#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
+
+#define bfa_ioc_recovery_timer_start(__ioc) \
+ mod_timer(&(__ioc)->ioc_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
+
+#define bfa_sem_timer_start(__ioc) \
+ mod_timer(&(__ioc)->sem_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
+#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
+
+#define bfa_hb_timer_start(__ioc) \
+ mod_timer(&(__ioc)->hb_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_HB_TOV))
+#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
+
+/**
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_hbfail(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
+
+#define bfa_ioc_is_optrom(__ioc) \
+ (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+
+#define bfa_ioc_mbox_cmd_pending(__ioc) \
+ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+bool bfa_auto_recover = true;
+
+/*
+ * forward declarations
+ */
+static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
+static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
+static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_recover(struct bfa_ioc *ioc);
+static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+
+/**
+ * IOC state machine events
+ */
+enum ioc_event {
+ IOC_E_ENABLE = 1, /*!< IOC enable request */
+ IOC_E_DISABLE = 2, /*!< IOC disable request */
+ IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
+ IOC_E_FWREADY = 4, /*!< f/w initialization done */
+ IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
+ IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
+ IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
+ IOC_E_HBFAIL = 8, /*!< heartbeat failure */
+ IOC_E_HWERROR = 9, /*!< hardware error interrupt */
+ IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
+ IOC_E_DETACH = 11, /*!< driver detach cleanup */
+};
+
+bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+
+static struct bfa_sm_table ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+ {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
+ {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+ {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+ {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
+ {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+ {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+ {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+};
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
+{
+ ioc->retry_count = 0;
+ ioc->auto_recover = bfa_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ case IOC_E_DETACH:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ } else {
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
+ }
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /* fall through */
+
+ case IOC_E_DETACH:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Notify enable completion callback and generate mismatch AEN.
+ */
+static void
+bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
+{
+ /**
+ * Provide enable completion callback and AEN notification only once.
+ */
+ if (ioc->retry_count == 0)
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ ioc->retry_count++;
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /* fall through */
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, false);
+}
+
+/**
+ * @brief
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWREADY:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, true);
+ break;
+ }
+
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_enable(ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_ENABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ writel(BFI_IOC_UNINIT,
+ ioc->ioc_regs.ioc_fwstate);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+ }
+
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_FWREADY:
+ bfa_ioc_send_enable(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_getattr(ioc);
+}
+
+/**
+ * @brief
+ * IOC configuration in progress. Timer is active.
+ */
+static void
+bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_check_attr_wwns(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_hb_monitor(ioc);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_HWERROR:
+ case IOC_E_FWREADY:
+ /**
+ * Hard error or IOC recovery by other function.
+ * Treat it same as heartbeat failure.
+ */
+ bfa_ioc_hb_stop(ioc);
+ /* !!! fall through !!! */
+
+ case IOC_E_HBFAIL:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_disable(ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOC_E_TIMEOUT:
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * @brief
+ * Hardware initialization failed.
+ */
+static void
+bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify *notify;
+
+ /**
+ * Mark IOC as failed in hardware and stop firmware.
+ */
+ bfa_ioc_lpu_stop(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Notify other functions on HB failure.
+ */
+ bfa_ioc_notify_hbfail(ioc);
+
+ /**
+ * Notify driver and common modules registered for notification.
+ */
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify *) qe;
+ notify->cbfn(notify->cbarg);
+ }
+
+ /**
+ * Flush any queued up mailbox requests.
+ */
+ bfa_ioc_mbox_hbfail(ioc);
+
+ /**
+ * Trigger auto-recovery after a delay.
+ */
+ if (ioc->auto_recover)
+ mod_timer(&ioc->ioc_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
+}
+
+/**
+ * @brief
+ * IOC heartbeat failure.
+ */
+static void
+bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ if (ioc->auto_recover)
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_FWREADY:
+ /**
+ * Recovery is already initiated by other function.
+ */
+ break;
+
+ case IOC_E_HWERROR:
+ /*
+ * HB failure notification, ignore.
+ */
+ break;
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * BFA IOC private functions
+ */
+
+static void
+bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify *notify;
+
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+
+ /**
+ * Notify common modules registered for notification.
+ */
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify *) qe;
+ notify->cbfn(notify->cbarg);
+ }
+}
+
+void
+bfa_ioc_sem_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+bool
+bfa_ioc_sem_get(void __iomem *sem_reg)
+{
+ u32 r32;
+ int cnt = 0;
+#define BFA_SEM_SPINCNT 3000
+
+ r32 = readl(sem_reg);
+
+ while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+ cnt++;
+ udelay(2);
+ r32 = readl(sem_reg);
+ }
+
+ if (r32 == 0)
+ return true;
+
+ BUG_ON(!(cnt < BFA_SEM_SPINCNT));
+ return false;
+}
+
+void
+bfa_ioc_sem_release(void __iomem *sem_reg)
+{
+ writel(1, sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ /**
+ * First read to the semaphore register will return 0, subsequent reads
+ * will return 1. Semaphore is released by writing 1 to the register
+ */
+ r32 = readl(ioc->ioc_regs.ioc_sem_reg);
+ if (r32 == 0) {
+ bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
+ return;
+ }
+
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+}
+
+void
+bfa_ioc_hw_sem_release(struct bfa_ioc *ioc)
+{
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->sem_timer);
+}
+
+/**
+ * @brief
+ * Initialize LPU local memory (aka secondary memory / SRAM)
+ */
+static void
+bfa_ioc_lmem_init(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+ int i;
+#define PSS_LMEM_INIT_TIME 10000
+
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LMEM_RESET;
+ pss_ctl |= __PSS_LMEM_INIT_EN;
+
+ /*
+ * i2c workaround 12.5khz clock
+ */
+ pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+
+ /**
+ * wait for memory initialization to be complete
+ */
+ i = 0;
+ do {
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ i++;
+ } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
+
+ /**
+ * If memory initialization is not successful, IOC timeout will catch
+ * such failures.
+ */
+ BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
+
+ pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_start(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Take processor out of reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LPU0_RESET;
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Put processors in reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+/**
+ * Get driver and firmware versions.
+ */
+void
+bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ int i;
+ u32 *fwsig = (u32 *) fwhdr;
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
+ i++) {
+ fwsig[i] =
+ swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ loff += sizeof(u32);
+ }
+}
+
+/**
+ * Returns TRUE if same.
+ */
+bool
+bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ int i;
+
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Return true if current running version is valid. Firmware signature and
+ * execution context (driver/bios) must match.
+ */
+static bool
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+
+ /**
+ * If bios/efi boot (flash based) -- return true
+ */
+ if (bfa_ioc_is_optrom(ioc))
+ return true;
+
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+
+ if (fwhdr.signature != drv_fwhdr->signature)
+ return false;
+
+ if (fwhdr.exec != drv_fwhdr->exec)
+ return false;
+
+ return bfa_ioc_fwver_cmp(ioc, &fwhdr);
+}
+
+/**
+ * Conditionally flush any pending message from firmware at start.
+ */
+static void
+bfa_ioc_msgflush(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if (r32)
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+/**
+ * @img ioc_init_logic.jpg
+ */
+static void
+bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ bool fwvalid;
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (force)
+ ioc_fwstate = BFI_IOC_UNINIT;
+
+ /**
+ * check if firmware is valid
+ */
+ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
+ false : bfa_ioc_fwver_valid(ioc);
+
+ if (!fwvalid) {
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ return;
+ }
+
+ /**
+ * If hardware initialization is in progress (initialized by other IOC),
+ * just wait for an initialization completion interrupt.
+ */
+ if (ioc_fwstate == BFI_IOC_INITING) {
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ return;
+ }
+
+ /**
+ * If IOC function is disabled and firmware version is same,
+ * just re-enable IOC.
+ *
+ * If option rom, IOC must not be in operational state. With
+ * convergence, IOC will be in operational state when 2nd driver
+ * is loaded.
+ */
+ if (ioc_fwstate == BFI_IOC_DISABLED ||
+ (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
+ /**
+ * When using MSI-X any pending firmware ready event should
+ * be flushed. Otherwise MSI-X interrupts are not delivered.
+ */
+ bfa_ioc_msgflush(ioc);
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ return;
+ }
+
+ /**
+ * Initialize the h/w for any other states.
+ */
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+}
+
+void
+bfa_ioc_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+}
+
+void
+bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
+{
+ u32 *msgp = (u32 *) ioc_msg;
+ u32 i;
+
+ BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
+
+ /*
+ * first write msg to mailbox registers
+ */
+ for (i = 0; i < len / sizeof(u32); i++)
+ writel(cpu_to_le32(msgp[i]),
+ ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
+ writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ /*
+ * write 1 to mailbox CMD to trigger LPU event
+ */
+ writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+ (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
+}
+
+static void
+bfa_ioc_send_enable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req enable_req;
+ struct timeval tv;
+
+ bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ bfa_ioc_portid(ioc));
+ enable_req.ioc_class = ioc->ioc_mc;
+ do_gettimeofday(&tv);
+ enable_req.tv_sec = ntohl(tv.tv_sec);
+ bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_disable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req disable_req;
+
+ bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_getattr(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_getattr_req attr_req;
+
+ bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
+ bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
+}
+
+void
+bfa_ioc_hb_check(void *cbarg)
+{
+ struct bfa_ioc *ioc = cbarg;
+ u32 hb_count;
+
+ hb_count = readl(ioc->ioc_regs.heartbeat);
+ if (ioc->hb_count == hb_count) {
+ pr_crit("Firmware heartbeat failure at %d", hb_count);
+ bfa_ioc_recover(ioc);
+ return;
+ } else {
+ ioc->hb_count = hb_count;
+ }
+
+ bfa_ioc_mbox_poll(ioc);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
+{
+ ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_stop(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->hb_timer);
+}
+
+/**
+ * @brief
+ * Initiate a full firmware download.
+ */
+static void
+bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_param)
+{
+ u32 *fwimg;
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ u32 chunkno = 0;
+ u32 i;
+
+ /**
+ * Initialize LMEM first before code download
+ */
+ bfa_ioc_lmem_init(ioc);
+
+ /**
+ * Flash based firmware boot
+ */
+ if (bfa_ioc_is_optrom(ioc))
+ boot_type = BFI_BOOT_TYPE_FLASH;
+ fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+ if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+ chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
+ fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
+
+ /**
+ * write smem
+ */
+ writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
+ ((ioc->ioc_regs.smem_page_start) + (loff)));
+
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum,
+ ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(bfa_ioc_smem_pgnum(ioc, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * Set boot type and boot param at the end.
+ */
+ writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_TYPE_OFF)));
+ writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_PARAM_OFF)));
+}
+
+static void
+bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
+{
+ bfa_ioc_hwinit(ioc, force);
+}
+
+/**
+ * @brief
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_attr *attr = ioc->attr;
+
+ attr->adapter_prop = ntohl(attr->adapter_prop);
+ attr->card_type = ntohl(attr->card_type);
+ attr->maxfrsize = ntohs(attr->maxfrsize);
+
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
+}
+
+/**
+ * Attach time initialization of mbox logic.
+ */
+static void
+bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ int mc;
+
+ INIT_LIST_HEAD(&mod->cmd_q);
+ for (mc = 0; mc < BFI_MC_MAX; mc++) {
+ mod->mbhdlr[mc].cbfn = NULL;
+ mod->mbhdlr[mc].cbarg = ioc->bfa;
+ }
+}
+
+/**
+ * Mbox poll timer -- restarts any pending mailbox requests.
+ */
+static void
+bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+ u32 stat;
+
+ /**
+ * If no command pending, do nothing
+ */
+ if (list_empty(&mod->cmd_q))
+ return;
+
+ /**
+ * If previous command is not yet fetched by firmware, do nothing
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat)
+ return;
+
+ /**
+ * Enqueue command to firmware.
+ */
+ bfa_q_deq(&mod->cmd_q, &cmd);
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Cleanup any pending requests.
+ */
+static void
+bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+
+ while (!list_empty(&mod->cmd_q))
+ bfa_q_deq(&mod->cmd_q, &cmd);
+}
+
+/**
+ * IOC public
+ */
+enum bfa_status
+bfa_ioc_pll_init(struct bfa_ioc *ioc)
+{
+ /*
+ * Hold semaphore so that nobody can access the chip during init.
+ */
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+ bfa_ioc_pll_init_asic(ioc);
+
+ ioc->pllinit = true;
+ /*
+ * release semaphore.
+ */
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Interface used by diag module to do firmware boot with memory test
+ * as the entry vector.
+ */
+void
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+{
+ void __iomem *rb;
+
+ bfa_ioc_stats(ioc, ioc_boots);
+
+ if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
+ return;
+
+ /**
+ * Initialize IOC state of all functions on a chip reset.
+ */
+ rb = ioc->pcidev.pci_bar_kva;
+ if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+ } else {
+ writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+ }
+
+ bfa_ioc_msgflush(ioc);
+ bfa_ioc_download_fw(ioc, boot_type, boot_param);
+
+ /**
+ * Enable interrupts just before starting LPU
+ */
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_ioc_lpu_start(ioc);
+}
+
+/**
+ * Enable/disable IOC failure auto recovery.
+ */
+void
+bfa_ioc_auto_recover(bool auto_recover)
+{
+ bfa_auto_recover = auto_recover;
+}
+
+bool
+bfa_ioc_is_operational(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+bool
+bfa_ioc_is_initialized(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+
+ return ((r32 != BFI_IOC_UNINIT) &&
+ (r32 != BFI_IOC_INITING) &&
+ (r32 != BFI_IOC_MEMTEST));
+}
+
+void
+bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
+{
+ u32 *msgp = mbmsg;
+ u32 r32;
+ int i;
+
+ /**
+ * read the MBOX msg
+ */
+ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
+ i++) {
+ r32 = readl(ioc->ioc_regs.lpu_mbox +
+ i * sizeof(u32));
+ msgp[i] = htonl(r32);
+ }
+
+ /**
+ * turn off mailbox interrupt by clearing mailbox status
+ */
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+ readl(ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+void
+bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+{
+ union bfi_ioc_i2h_msg_u *msg;
+
+ msg = (union bfi_ioc_i2h_msg_u *) m;
+
+ bfa_ioc_stats(ioc, ioc_isrs);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOC_I2H_HBEAT:
+ break;
+
+ case BFI_IOC_I2H_READY_EVENT:
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ break;
+
+ case BFI_IOC_I2H_ENABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
+ break;
+
+ case BFI_IOC_I2H_DISABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
+ break;
+
+ case BFI_IOC_I2H_GETATTR_REPLY:
+ bfa_ioc_getattr_reply(ioc);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * IOC attach time initialization and setup.
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] bfa driver instance structure
+ */
+void
+bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
+{
+ ioc->bfa = bfa;
+ ioc->cbfn = cbfn;
+ ioc->fcmode = false;
+ ioc->pllinit = false;
+ ioc->dbg_fwsave_once = true;
+
+ bfa_ioc_mbox_attach(ioc);
+ INIT_LIST_HEAD(&ioc->hb_notify_q);
+
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+}
+
+/**
+ * Driver detach time IOC cleanup.
+ */
+void
+bfa_ioc_detach(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DETACH);
+}
+
+/**
+ * Setup IOC PCI properties.
+ *
+ * @param[in] pcidev PCI device information for this IOC
+ */
+void
+bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_mclass mc)
+{
+ ioc->ioc_mc = mc;
+ ioc->pcidev = *pcidev;
+ ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
+ ioc->cna = ioc->ctdev && !ioc->fcmode;
+
+ bfa_ioc_set_ct_hwif(ioc);
+
+ bfa_ioc_map_port(ioc);
+ bfa_ioc_reg_init(ioc);
+}
+
+/**
+ * Initialize IOC dma memory
+ *
+ * @param[in] dm_kva kernel virtual address of IOC dma memory
+ * @param[in] dm_pa physical address of IOC dma memory
+ */
+void
+bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
+{
+ /**
+ * dma memory for firmware attribute
+ */
+ ioc->attr_dma.kva = dm_kva;
+ ioc->attr_dma.pa = dm_pa;
+ ioc->attr = (struct bfi_ioc_attr *) dm_kva;
+}
+
+/**
+ * Return size of dma memory required.
+ */
+u32
+bfa_ioc_meminfo(void)
+{
+ return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_ioc_enable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_enables);
+ ioc->dbg_fwsave_once = true;
+
+ bfa_fsm_send_event(ioc, IOC_E_ENABLE);
+}
+
+void
+bfa_ioc_disable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_disables);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+}
+
+u32
+bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
+}
+
+u32
+bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGOFF(fmaddr);
+}
+
+/**
+ * Register mailbox message handler functions
+ *
+ * @param[in] ioc IOC instance
+ * @param[in] mcfuncs message class handler functions
+ */
+void
+bfa_ioc_mbox_register(struct bfa_ioc *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ int mc;
+
+ for (mc = 0; mc < BFI_MC_MAX; mc++)
+ mod->mbhdlr[mc].cbfn = mcfuncs[mc];
+}
+
+/**
+ * Register mailbox message handler function, to be called by common modules
+ */
+void
+bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+
+ mod->mbhdlr[mc].cbfn = cbfn;
+ mod->mbhdlr[mc].cbarg = cbarg;
+}
+
+/**
+ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
+ * Responsibility of caller to serialize
+ *
+ * @param[in] ioc IOC instance
+ * @param[i] cmd Mailbox command
+ */
+void
+bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ u32 stat;
+
+ /**
+ * If a previous command is pending, queue new command
+ */
+ if (!list_empty(&mod->cmd_q)) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * If mailbox is busy, queue command for poll timer
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * mailbox is free -- queue command to firmware
+ */
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Handle mailbox interrupts
+ */
+void
+bfa_ioc_mbox_isr(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfi_mbmsg m;
+ int mc;
+
+ bfa_ioc_msgget(ioc, &m);
+
+ /**
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
+
+ if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+}
+
+void
+bfa_ioc_error_isr(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+void
+bfa_ioc_set_fcmode(struct bfa_ioc *ioc)
+{
+ ioc->fcmode = true;
+ ioc->port_id = bfa_ioc_pcifn(ioc);
+}
+
+/**
+ * return true if IOC is disabled
+ */
+bool
+bfa_ioc_is_disabled(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+ bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+}
+
+/**
+ * return true if IOC firmware is different.
+ */
+bool
+bfa_ioc_fw_mismatch(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
+ bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) ||
+ bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
+}
+
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
+/**
+ * Check if adapter is disabled -- both IOCs should be in a disabled
+ * state.
+ */
+bool
+bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc)
+{
+ u32 ioc_state;
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+
+ if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
+ return false;
+
+ ioc_state = readl(rb + BFA_IOC0_STATE_REG);
+ if (!bfa_ioc_state_disabled(ioc_state))
+ return false;
+
+ if (ioc->pcidev.device_id != PCI_DEVICE_ID_BROCADE_FC_8G1P) {
+ ioc_state = readl(rb + BFA_IOC1_STATE_REG);
+ if (!bfa_ioc_state_disabled(ioc_state))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Add to IOC heartbeat failure notification queue. To be used by common
+ * modules such as cee, port, diag.
+ */
+void
+bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_hbfail_notify *notify)
+{
+ list_add_tail(¬ify->qe, &ioc->hb_notify_q);
+}
+
+#define BFA_MFG_NAME "Brocade"
+void
+bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ struct bfa_adapter_attr *ad_attr)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ ioc_attr = ioc->attr;
+
+ bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+ bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+ bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+ bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
+ memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+ sizeof(struct bfa_mfg_vpd));
+
+ ad_attr->nports = bfa_ioc_get_nports(ioc);
+ ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
+
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+ /* For now, model descr uses same model string */
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
+
+ ad_attr->card_type = ioc_attr->card_type;
+ ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
+
+ if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
+ ad_attr->prototype = 1;
+ else
+ ad_attr->prototype = 0;
+
+ ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
+ ad_attr->mac = bfa_ioc_get_mac(ioc);
+
+ ad_attr->pcie_gen = ioc_attr->pcie_gen;
+ ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+ ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
+ ad_attr->asic_rev = ioc_attr->asic_rev;
+
+ bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
+
+ ad_attr->cna_capable = ioc->cna;
+ ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
+}
+
+enum bfa_ioc_type
+bfa_ioc_get_type(struct bfa_ioc *ioc)
+{
+ if (!ioc->ctdev || ioc->fcmode)
+ return BFA_IOC_TYPE_FC;
+ else if (ioc->ioc_mc == BFI_MC_IOCFC)
+ return BFA_IOC_TYPE_FCoE;
+ else if (ioc->ioc_mc == BFI_MC_LL)
+ return BFA_IOC_TYPE_LL;
+ else {
+ BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
+ return BFA_IOC_TYPE_LL;
+ }
+}
+
+void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
+{
+ memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+ memcpy(serial_num,
+ (void *)ioc->attr->brcd_serialnum,
+ BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
+{
+ memset(fw_ver, 0, BFA_VERSION_LEN);
+ memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
+{
+ BUG_ON(!(chip_rev));
+
+ memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+ chip_rev[0] = 'R';
+ chip_rev[1] = 'e';
+ chip_rev[2] = 'v';
+ chip_rev[3] = '-';
+ chip_rev[4] = ioc->attr->asic_rev;
+ chip_rev[5] = '\0';
+}
+
+void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
+{
+ memset(optrom_ver, 0, BFA_VERSION_LEN);
+ memcpy(optrom_ver, ioc->attr->optrom_version,
+ BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
+{
+ memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+void
+bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ BUG_ON(!(model));
+ memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+ ioc_attr = ioc->attr;
+
+ /**
+ * model name
+ */
+ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+ BFA_MFG_NAME, ioc_attr->card_type);
+}
+
+enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc *ioc)
+{
+ return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+}
+
+void
+bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
+{
+ memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
+
+ ioc_attr->state = bfa_ioc_get_state(ioc);
+ ioc_attr->port_id = ioc->port_id;
+
+ ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
+
+ bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
+
+ ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
+ ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
+ bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
+}
+
+/**
+ * WWN public
+ */
+u64
+bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
+{
+ return ioc->attr->pwwn;
+}
+
+u64
+bfa_ioc_get_nwwn(struct bfa_ioc *ioc)
+{
+ return ioc->attr->nwwn;
+}
+
+u64
+bfa_ioc_get_adid(struct bfa_ioc *ioc)
+{
+ return ioc->attr->mfg_pwwn;
+}
+
+mac_t
+bfa_ioc_get_mac(struct bfa_ioc *ioc)
+{
+ /*
+ * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
+ */
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
+ return bfa_ioc_get_mfg_mac(ioc);
+ else
+ return ioc->attr->mac;
+}
+
+u64
+bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc)
+{
+ return ioc->attr->mfg_pwwn;
+}
+
+u64
+bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc)
+{
+ return ioc->attr->mfg_nwwn;
+}
+
+mac_t
+bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
+{
+ mac_t m;
+
+ m = ioc->attr->mfg_mac;
+ if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
+ m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+ else
+ bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
+ bfa_ioc_pcifn(ioc));
+
+ return m;
+}
+
+bool
+bfa_ioc_get_fcmode(struct bfa_ioc *ioc)
+{
+ return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
+}
+
+/**
+ * Firmware failure detected. Start recovery actions.
+ */
+static void
+bfa_ioc_recover(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+}
+
+static void
+bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
+{
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
+ return;
+
+}
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_IOC_H__
+#define __BFA_IOC_H__
+
+#include "bfa_sm.h"
+#include "bfi.h"
+#include "cna.h"
+
+#define BFA_IOC_TOV 3000 /* msecs */
+#define BFA_IOC_HWSEM_TOV 500 /* msecs */
+#define BFA_IOC_HB_TOV 500 /* msecs */
+#define BFA_IOC_HWINIT_MAX 2
+#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
+
+/**
+ * Generic Scatter Gather Element used by driver
+ */
+struct bfa_sge {
+ u32 sg_len;
+ void *sg_addr;
+};
+
+/**
+ * PCI device information required by IOC
+ */
+struct bfa_pcidev {
+ int pci_slot;
+ u8 pci_func;
+ u16 device_id;
+ void __iomem *pci_bar_kva;
+};
+
+/**
+ * Structure used to remember the DMA-able memory block's KVA and Physical
+ * Address
+ */
+struct bfa_dma {
+ void *kva; /* ! Kernel virtual address */
+ u64 pa; /* ! Physical address */
+};
+
+#define BFA_DMA_ALIGN_SZ 256
+
+/**
+ * smem size for Crossbow and Catapult
+ */
+#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
+#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
+
+/**
+ * @brief BFA dma address assignment macro
+ */
+#define bfa_dma_addr_set(dma_addr, pa) \
+ __bfa_dma_addr_set(&dma_addr, (u64)pa)
+
+static inline void
+__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) pa;
+ dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
+}
+
+/**
+ * @brief BFA dma address assignment macro. (big endian format)
+ */
+#define bfa_dma_be_addr_set(dma_addr, pa) \
+ __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
+static inline void
+__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) htonl(pa);
+ dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
+}
+
+struct bfa_ioc_regs {
+ void __iomem *hfn_mbox_cmd;
+ void __iomem *hfn_mbox;
+ void __iomem *lpu_mbox_cmd;
+ void __iomem *lpu_mbox;
+ void __iomem *pss_ctl_reg;
+ void __iomem *pss_err_status_reg;
+ void __iomem *app_pll_fast_ctl_reg;
+ void __iomem *app_pll_slow_ctl_reg;
+ void __iomem *ioc_sem_reg;
+ void __iomem *ioc_usage_sem_reg;
+ void __iomem *ioc_init_sem_reg;
+ void __iomem *ioc_usage_reg;
+ void __iomem *host_page_num_fn;
+ void __iomem *heartbeat;
+ void __iomem *ioc_fwstate;
+ void __iomem *ll_halt;
+ void __iomem *err_set;
+ void __iomem *shirq_isr_next;
+ void __iomem *shirq_msk_next;
+ void __iomem *smem_page_start;
+ u32 smem_pg0;
+};
+
+/**
+ * IOC Mailbox structures
+ */
+struct bfa_mbox_cmd {
+ struct list_head qe;
+ u32 msg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * IOC mailbox module
+ */
+typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
+struct bfa_ioc_mbox_mod {
+ struct list_head cmd_q; /*!< pending mbox queue */
+ int nmclass; /*!< number of handlers */
+ struct {
+ bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
+ void *cbarg;
+ } mbhdlr[BFI_MC_MAX];
+};
+
+/**
+ * IOC callback function interfaces
+ */
+typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
+typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
+struct bfa_ioc_cbfn {
+ bfa_ioc_enable_cbfn_t enable_cbfn;
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+};
+
+/**
+ * Heartbeat failure notification queue element.
+ */
+struct bfa_ioc_hbfail_notify {
+ struct list_head qe;
+ bfa_ioc_hbfail_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/**
+ * Initialize a heartbeat failure notification structure
+ */
+#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
+ (__notify)->cbfn = (__cbfn); \
+ (__notify)->cbarg = (__cbarg); \
+} while (0)
+
+struct bfa_ioc {
+ bfa_fsm_t fsm;
+ struct bfa *bfa;
+ struct bfa_pcidev pcidev;
+ struct bfa_timer_mod *timer_mod;
+ struct timer_list ioc_timer;
+ struct timer_list sem_timer;
+ struct timer_list hb_timer;
+ u32 hb_count;
+ u32 retry_count;
+ struct list_head hb_notify_q;
+ void *dbg_fwsave;
+ int dbg_fwsave_len;
+ bool dbg_fwsave_once;
+ enum bfi_mclass ioc_mc;
+ struct bfa_ioc_regs ioc_regs;
+ struct bfa_ioc_drv_stats stats;
+ bool auto_recover;
+ bool fcmode;
+ bool ctdev;
+ bool cna;
+ bool pllinit;
+ bool stats_busy; /*!< outstanding stats */
+ u8 port_id;
+
+ struct bfa_dma attr_dma;
+ struct bfi_ioc_attr *attr;
+ struct bfa_ioc_cbfn *cbfn;
+ struct bfa_ioc_mbox_mod mbox_mod;
+ struct bfa_ioc_hwif *ioc_hwif;
+};
+
+struct bfa_ioc_hwif {
+ enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
+ bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
+ void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
+ void (*ioc_reg_init) (struct bfa_ioc *ioc);
+ void (*ioc_map_port) (struct bfa_ioc *ioc);
+ void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
+ bool msix);
+ void (*ioc_notify_hbfail) (struct bfa_ioc *ioc);
+ void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+};
+
+#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
+#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
+#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_fetch_stats(__ioc, __stats) \
+ (((__stats)->drv_stats) = (__ioc)->stats)
+#define bfa_ioc_clr_stats(__ioc) \
+ memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
+#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
+#define bfa_ioc_speed_sup(__ioc) \
+ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+#define bfa_ioc_get_nports(__ioc) \
+ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
+#define BFA_IOC_FWIMG_TYPE(__ioc) \
+ (((__ioc)->ctdev) ? \
+ (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
+ BFI_IMAGE_CB_FC)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
+ (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
+
+/**
+ * IOC mailbox interface
+ */
+void bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
+void bfa_ioc_mbox_register(struct bfa_ioc *ioc,
+ bfa_ioc_mbox_mcfunc_t *mcfuncs);
+void bfa_ioc_mbox_isr(struct bfa_ioc *ioc);
+void bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len);
+void bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg);
+void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+
+/**
+ * IOC interfaces
+ */
+
+#define bfa_ioc_pll_init_asic(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+ (__ioc)->fcmode))
+
+enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc);
+enum bfa_status bfa_ioc_cb_pll_init(void __iomem *rb, bool fcmode);
+enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+
+#define bfa_ioc_isr_mode_set(__ioc, __msix) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_ownership_reset(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+
+void bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc);
+
+void bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa,
+ struct bfa_ioc_cbfn *cbfn);
+void bfa_ioc_auto_recover(bool auto_recover);
+void bfa_ioc_detach(struct bfa_ioc *ioc);
+void bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_mclass mc);
+u32 bfa_ioc_meminfo(void);
+void bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
+void bfa_ioc_enable(struct bfa_ioc *ioc);
+void bfa_ioc_disable(struct bfa_ioc *ioc);
+bool bfa_ioc_intx_claim(struct bfa_ioc *ioc);
+
+void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_param);
+void bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *msg);
+void bfa_ioc_error_isr(struct bfa_ioc *ioc);
+bool bfa_ioc_is_operational(struct bfa_ioc *ioc);
+bool bfa_ioc_is_initialized(struct bfa_ioc *ioc);
+bool bfa_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_ioc_fw_mismatch(struct bfa_ioc *ioc);
+bool bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc);
+void bfa_ioc_cfg_complete(struct bfa_ioc *ioc);
+enum bfa_ioc_type bfa_ioc_get_type(struct bfa_ioc *ioc);
+void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num);
+void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver);
+void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver);
+void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
+void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
+ char *manufacturer);
+void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev);
+enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc);
+
+void bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ struct bfa_adapter_attr *ad_attr);
+u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
+u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
+void bfa_ioc_set_fcmode(struct bfa_ioc *ioc);
+bool bfa_ioc_get_fcmode(struct bfa_ioc *ioc);
+void bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_hbfail_notify *notify);
+bool bfa_ioc_sem_get(void __iomem *sem_reg);
+void bfa_ioc_sem_release(void __iomem *sem_reg);
+void bfa_ioc_hw_sem_release(struct bfa_ioc *ioc);
+void bfa_ioc_fwver_get(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+bool bfa_ioc_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+
+/*
+ * Timeout APIs
+ */
+void bfa_ioc_timeout(void *ioc);
+void bfa_ioc_hb_check(void *ioc);
+void bfa_ioc_sem_timeout(void *ioc);
+
+/*
+ * bfa mfg wwn API functions
+ */
+u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
+u64 bfa_ioc_get_nwwn(struct bfa_ioc *ioc);
+mac_t bfa_ioc_get_mac(struct bfa_ioc *ioc);
+u64 bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc);
+u64 bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc);
+mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
+u64 bfa_ioc_get_adid(struct bfa_ioc *ioc);
+
+/*
+ * F/W Image Size & Chunk
+ */
+u32 *bfa_cb_image_get_chunk(int type, u32 off);
+u32 bfa_cb_image_get_size(int type);
+
+#endif /* __BFA_IOC_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+
+/*
+ * forward declarations
+ */
+static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
+static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+
+struct bfa_ioc_hwif hwif_ct;
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc)
+{
+ hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+ hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+ hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+ hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+ hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+ hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+ hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+ hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+
+ ioc->ioc_hwif = &hwif_ct;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bool
+bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr fwhdr;
+
+ /**
+ * Firmware match check is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return true;
+
+ /**
+ * If bios boot (flash based) -- do not increment usage count
+ */
+ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return true;
+
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+
+ /**
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+ }
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
+
+ /**
+ * Check if another driver with a different firmware is active
+ */
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return false;
+ }
+
+ /**
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
+{
+ u32 usecnt;
+
+ /**
+ * Firmware lock is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return;
+
+ /**
+ * If bios boot (flash based) -- do not decrement usage count
+ */
+ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return;
+
+ /**
+ * decrement usage count
+ */
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ BUG_ON(!(usecnt > 0));
+
+ usecnt--;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
+{
+ if (ioc->cna) {
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+ /* Wait for halt to take effect */
+ readl(ioc->ioc_regs.ll_halt);
+ } else {
+ writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ readl(ioc->ioc_regs.err_set);
+ }
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+ { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+ { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
+ { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
+ { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
+{
+ void __iomem *rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /**
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = readl(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = readl(rb + FNC_PERS_REG);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /**
+ * If already in desired mode, do not change anything
+ */
+ if (!msix && mode)
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+
+ writel(r32, rb + FNC_PERS_REG);
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+{
+ if (ioc->cna) {
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_usage_reg);
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ }
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ bfa_ioc_hw_sem_release(ioc);
+}
+
+enum bfa_status
+bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
+{
+ u32 pll_sclk, pll_fclk, r32;
+
+ pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
+ __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
+ __APP_PLL_312_JITLMT0_1(3U) |
+ __APP_PLL_312_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
+ __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
+ __APP_PLL_425_JITLMT0_1(3U) |
+ __APP_PLL_425_CNTLMT0_1(1U);
+ if (fcmode) {
+ writel(0, (rb + OP_MODE));
+ writel(__APP_EMS_CMLCKSEL |
+ __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL,
+ (rb + ETH_MAC_SER_REG));
+ } else {
+ writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+ writel(__APP_EMS_REFCKBUFEN1,
+ (rb + ETH_MAC_SER_REG));
+ }
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET,
+ rb + APP_PLL_425_CTL_REG);
+ writel(pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
+ rb + APP_PLL_425_CTL_REG);
+ readl(rb + HOSTFN0_INT_MSK);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk |
+ __APP_PLL_312_ENABLE,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_ENABLE,
+ rb + APP_PLL_425_CTL_REG);
+ if (!fcmode) {
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
+ }
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+ if (!fcmode) {
+ writel(0, (rb + PMM_1T_RESET_REG_P0));
+ writel(0, (rb + PMM_1T_RESET_REG_P1));
+ }
+
+ writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+ udelay(1000);
+ r32 = readl((rb + MBIST_STAT_REG));
+ writel(0, (rb + MBIST_CTL_REG));
+ return BFA_STATUS_OK;
+}
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfasm.h State machine defines
+ */
+
+#ifndef __BFA_SM_H__
+#define __BFA_SM_H__
+
+#include "cna.h"
+
+typedef void (*bfa_sm_t)(void *sm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event)
+
+#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
+#define bfa_sm_get_state(_sm) ((_sm)->sm)
+#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
+
+/**
+ * For converting from state machine function to state encoding.
+ */
+struct bfa_sm_table {
+ bfa_sm_t sm; /*!< state machine function */
+ int state; /*!< state machine encoding */
+ char *name; /*!< state name for display */
+};
+#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+
+/**
+ * State machine with entry actions.
+ */
+typedef void (*bfa_fsm_t)(void *fsm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_fsm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event); \
+ static void oc ## _sm_ ## st ## _entry(otype * fsm)
+
+#define bfa_fsm_set_state(_fsm, _state) do { \
+ (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ _state ## _entry(_fsm); \
+} while (0)
+
+#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
+#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
+#define bfa_fsm_cmp_state(_fsm, _state) \
+ ((_fsm)->fsm == (bfa_fsm_t)(_state))
+
+static inline int
+bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+#endif
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfa_wc.h Generic wait counter.
+ */
+
+#ifndef __BFA_WC_H__
+#define __BFA_WC_H__
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc {
+ bfa_wc_resume_t wc_resume;
+ void *wc_cbarg;
+ int wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc *wc)
+{
+ wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc *wc)
+{
+ wc->wc_count--;
+ if (wc->wc_count == 0)
+ wc->wc_resume(wc->wc_cbarg);
+}
+
+/**
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+ wc->wc_resume = wc_resume;
+ wc->wc_cbarg = wc_cbarg;
+ wc->wc_count = 0;
+ bfa_wc_up(wc);
+}
+
+/**
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc *wc)
+{
+ bfa_wc_down(wc);
+}
+
+#endif
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFI_H__
+#define __BFI_H__
+
+#include "bfa_defs.h"
+
+#pragma pack(1)
+
+/**
+ * BFI FW image type
+ */
+#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
+#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+enum {
+ BFI_IMAGE_CB_FC,
+ BFI_IMAGE_CT_FC,
+ BFI_IMAGE_CT_CNA,
+ BFI_IMAGE_MAX,
+};
+
+/**
+ * Msg header common to all msgs
+ */
+struct bfi_mhdr {
+ u8 msg_class; /*!< @ref enum bfi_mclass */
+ u8 msg_id; /*!< msg opcode with in the class */
+ union {
+ struct {
+ u8 rsvd;
+ u8 lpu_id; /*!< msg destination */
+ } h2i;
+ u16 i2htok; /*!< token in msgs to host */
+ } mtag;
+};
+
+#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.h2i.lpu_id = (_lpuid); \
+} while (0)
+
+#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.i2htok = (_i2htok); \
+} while (0)
+
+/*
+ * Message opcodes: 0-127 to firmware, 128-255 to host
+ */
+#define BFI_I2H_OPCODE_BASE 128
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+
+/**
+ ****************************************************************************
+ *
+ * Scatter Gather Element and Page definition
+ *
+ ****************************************************************************
+ */
+
+#define BFI_SGE_INLINE 1
+#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
+
+/**
+ * SG Flags
+ */
+enum {
+ BFI_SGE_DATA = 0, /*!< data address, not last */
+ BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
+ BFI_SGE_DATA_LAST = 3, /*!< data address, last */
+ BFI_SGE_LINK = 2, /*!< link address */
+ BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
+};
+
+/**
+ * DMA addresses
+ */
+union bfi_addr_u {
+ struct {
+ u32 addr_lo;
+ u32 addr_hi;
+ } a32;
+};
+
+/**
+ * Scatter Gather Element
+ */
+struct bfi_sge {
+#ifdef __BIGENDIAN
+ u32 flags:2,
+ rsvd:2,
+ sg_len:28;
+#else
+ u32 sg_len:28,
+ rsvd:2,
+ flags:2;
+#endif
+ union bfi_addr_u sga;
+};
+
+/**
+ * Scatter Gather Page
+ */
+#define BFI_SGPG_DATA_SGES 7
+#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
+#define BFI_SGPG_RSVD_WD_LEN 8
+struct bfi_sgpg {
+ struct bfi_sge sges[BFI_SGPG_SGES_MAX];
+ u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
+};
+
+/*
+ * Large Message structure - 128 Bytes size Msgs
+ */
+#define BFI_LMSG_SZ 128
+#define BFI_LMSG_PL_WSZ \
+ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
+
+struct bfi_msg {
+ struct bfi_mhdr mhdr;
+ u32 pl[BFI_LMSG_PL_WSZ];
+};
+
+/**
+ * Mailbox message structure
+ */
+#define BFI_MBMSG_SZ 7
+struct bfi_mbmsg {
+ struct bfi_mhdr mh;
+ u32 pl[BFI_MBMSG_SZ];
+};
+
+/**
+ * Message Classes
+ */
+enum bfi_mclass {
+ BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
+ BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
+ BFI_MC_FLASH = 3, /*!< Flash message class */
+ BFI_MC_CEE = 4, /*!< CEE */
+ BFI_MC_FCPORT = 5, /*!< FC port */
+ BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
+ BFI_MC_LL = 7, /*!< Link Layer */
+ BFI_MC_UF = 8, /*!< Unsolicited frame receive */
+ BFI_MC_FCXP = 9, /*!< FC Transport */
+ BFI_MC_LPS = 10, /*!< lport fc login services */
+ BFI_MC_RPORT = 11, /*!< Remote port */
+ BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
+ BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
+ BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
+ BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
+ BFI_MC_TSKIM = 18, /*!< Initiator Task management */
+ BFI_MC_SBOOT = 19, /*!< SAN boot services */
+ BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
+ BFI_MC_PORT = 21, /*!< Physical port */
+ BFI_MC_SFP = 22, /*!< SFP module */
+ BFI_MC_MSGQ = 23, /*!< MSGQ */
+ BFI_MC_ENET = 24, /*!< ENET commands/responses */
+ BFI_MC_MAX = 32
+};
+
+#define BFI_IOC_MAX_CQS 4
+#define BFI_IOC_MAX_CQS_ASIC 8
+#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
+
+#define BFI_BOOT_TYPE_OFF 8
+#define BFI_BOOT_PARAM_OFF 12
+
+#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
+#define BFI_BOOT_TYPE_FLASH 1
+#define BFI_BOOT_TYPE_MEMTEST 2
+
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
+
+/**
+ *----------------------------------------------------------------------
+ * IOC
+ *----------------------------------------------------------------------
+ */
+
+enum bfi_ioc_h2i_msgs {
+ BFI_IOC_H2I_ENABLE_REQ = 1,
+ BFI_IOC_H2I_DISABLE_REQ = 2,
+ BFI_IOC_H2I_GETATTR_REQ = 3,
+ BFI_IOC_H2I_DBG_SYNC = 4,
+ BFI_IOC_H2I_DBG_DUMP = 5,
+};
+
+enum bfi_ioc_i2h_msgs {
+ BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
+ BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
+};
+
+/**
+ * BFI_IOC_H2I_GETATTR_REQ message
+ */
+struct bfi_ioc_getattr_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u attr_addr;
+};
+
+struct bfi_ioc_attr {
+ u64 mfg_pwwn; /*!< Mfg port wwn */
+ u64 mfg_nwwn; /*!< Mfg node wwn */
+ mac_t mfg_mac; /*!< Mfg mac */
+ u16 rsvd_a;
+ u64 pwwn;
+ u64 nwwn;
+ mac_t mac; /*!< PBC or Mfg mac */
+ u16 rsvd_b;
+ mac_t fcoe_mac;
+ u16 rsvd_c;
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 rx_bbcredit; /*!< receive buffer credits */
+ u32 adapter_prop; /*!< adapter properties */
+ u16 maxfrsize; /*!< max receive frame size */
+ char asic_rev;
+ u8 rsvd_d;
+ char fw_version[BFA_VERSION_LEN];
+ char optrom_version[BFA_VERSION_LEN];
+ struct bfa_mfg_vpd vpd;
+ u32 card_type; /*!< card type */
+};
+
+/**
+ * BFI_IOC_I2H_GETATTR_REPLY message
+ */
+struct bfi_ioc_getattr_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< cfg reply status */
+ u8 rsvd[3];
+};
+
+/**
+ * Firmware memory page offsets
+ */
+#define BFI_IOC_SMEM_PG0_CB (0x40)
+#define BFI_IOC_SMEM_PG0_CT (0x180)
+
+/**
+ * Firmware statistic offset
+ */
+#define BFI_IOC_FWSTATS_OFF (0x6B40)
+#define BFI_IOC_FWSTATS_SZ (4096)
+
+/**
+ * Firmware trace offset
+ */
+#define BFI_IOC_TRC_OFF (0x4b00)
+#define BFI_IOC_TRC_ENTS 256
+
+#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_MD5SUM_SZ 4
+struct bfi_ioc_image_hdr {
+ u32 signature; /*!< constant signature */
+ u32 rsvd_a;
+ u32 exec; /*!< exec vector */
+ u32 param; /*!< parameters */
+ u32 rsvd_b[4];
+ u32 md5sum[BFI_IOC_MD5SUM_SZ];
+};
+
+/**
+ * BFI_IOC_I2H_READY_EVENT message
+ */
+struct bfi_ioc_rdy_event {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 init_status; /*!< init event status */
+ u8 rsvd[3];
+};
+
+struct bfi_ioc_hbeat {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 hb_count; /*!< current heart beat count */
+};
+
+/**
+ * IOC hardware/firmware state
+ */
+enum bfi_ioc_state {
+ BFI_IOC_UNINIT = 0, /*!< not initialized */
+ BFI_IOC_INITING = 1, /*!< h/w is being initialized */
+ BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
+ BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
+ BFI_IOC_OP = 4, /*!< IOC is operational */
+ BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
+ BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
+ BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
+ BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
+ BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
+};
+
+#define BFI_IOC_ENDIAN_SIG 0x12345678
+
+enum {
+ BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
+ BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
+ BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
+ BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
+ BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
+ BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
+ BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
+ BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
+ BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
+ BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
+};
+
+#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
+ (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
+ BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_SETP(__prop, __val) \
+ ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_IS_PROTO(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_PROTO)
+#define BFI_ADAPTER_IS_TTV(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_TTV)
+#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_UNSUPP)
+#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
+ ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
+ BFI_ADAPTER_UNSUPP))
+
+/**
+ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
+ */
+struct bfi_ioc_ctrl_req {
+ struct bfi_mhdr mh;
+ u8 ioc_class;
+ u8 rsvd[3];
+ u32 tv_sec;
+};
+
+/**
+ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
+ */
+struct bfi_ioc_ctrl_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< enable/disable status */
+ u8 rsvd[3];
+};
+
+#define BFI_IOC_MSGSZ 8
+/**
+ * H2I Messages
+ */
+union bfi_ioc_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_ctrl_req enable_req;
+ struct bfi_ioc_ctrl_req disable_req;
+ struct bfi_ioc_getattr_req getattr_req;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * I2H Messages
+ */
+union bfi_ioc_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_rdy_event rdy_event;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+#pragma pack()
+
+#endif /* __BFI_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_CNA_H__
+#define __BFI_CNA_H__
+
+#include "bfi.h"
+#include "bfa_defs_cna.h"
+
+#pragma pack(1)
+
+enum bfi_port_h2i {
+ BFI_PORT_H2I_ENABLE_REQ = (1),
+ BFI_PORT_H2I_DISABLE_REQ = (2),
+ BFI_PORT_H2I_GET_STATS_REQ = (3),
+ BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
+};
+
+enum bfi_port_i2h {
+ BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
+ BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
+ BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
+ BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
+};
+
+/**
+ * Generic REQ type
+ */
+struct bfi_port_generic_req {
+ struct bfi_mhdr mh; /*!< msg header */
+ u32 msgtag; /*!< msgtag for reply */
+ u32 rsvd;
+};
+
+/**
+ * Generic RSP type
+ */
+struct bfi_port_generic_rsp {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 status; /*!< port enable status */
+ u8 rsvd[3];
+ u32 msgtag; /*!< msgtag for reply */
+};
+
+/**
+ * @todo
+ * BFI_PORT_H2I_ENABLE_REQ
+ */
+
+/**
+ * @todo
+ * BFI_PORT_I2H_ENABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_DISABLE_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_DISABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_GET_STATS_REQ
+ */
+struct bfi_port_get_stats_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ union bfi_addr_u dma_addr;
+};
+
+/**
+ * BFI_PORT_I2H_GET_STATS_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_CLEAR_STATS_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_CLEAR_STATS_RSP
+ */
+
+union bfi_port_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_req enable_req;
+ struct bfi_port_generic_req disable_req;
+ struct bfi_port_get_stats_req getstats_req;
+ struct bfi_port_generic_req clearstats_req;
+};
+
+union bfi_port_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_rsp enable_rsp;
+ struct bfi_port_generic_rsp disable_rsp;
+ struct bfi_port_generic_rsp getstats_rsp;
+ struct bfi_port_generic_rsp clearstats_rsp;
+};
+
+/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
+enum bfi_cee_h2i_msgs {
+ BFI_CEE_H2I_GET_CFG_REQ = 1,
+ BFI_CEE_H2I_RESET_STATS = 2,
+ BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
+enum bfi_cee_i2h_msgs {
+ BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+ BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+ BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/* Data structures */
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_lldp_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_cee_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_CFG_REQ
+ */
+struct bfi_cee_get_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_CFG_RSP
+ */
+struct bfi_cee_get_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_STATS_REQ
+ */
+struct bfi_cee_stats_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_STATS_RSP
+ */
+struct bfi_cee_stats_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* @brief mailbox command structures from host to firmware */
+union bfi_cee_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_req get_req;
+ struct bfi_cee_stats_req stats_req;
+};
+
+/* @brief mailbox message structures from firmware to host */
+union bfi_cee_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_rsp get_rsp;
+ struct bfi_cee_stats_rsp stats_rsp;
+};
+
+#pragma pack()
+
+#endif /* __BFI_CNA_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/*
+ * bfi_ctreg.h catapult host block register definitions
+ *
+ * !!! Do not edit. Auto generated. !!!
+ */
+
+#ifndef __BFI_CTREG_H__
+#define __BFI_CTREG_H__
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200
+#define HOSTFN1_LPU_MBOX0_8 0x00019260
+#define LPU_HOSTFN0_MBOX0_0 0x00019280
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0
+#define HOSTFN2_LPU_MBOX0_0 0x00019400
+#define HOSTFN3_LPU_MBOX0_8 0x00019460
+#define LPU_HOSTFN2_MBOX0_0 0x00019480
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0
+#define HOSTFN0_INT_STATUS 0x00014000
+#define __HOSTFN0_HALT_OCCURRED 0x01000000
+#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN0_INT_STATUS_LVL_SH 20
+#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
+#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN0_INT_STATUS_P_SH 16
+#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
+#define __HOSTFN0_INT_STATUS_F 0x0000ffff
+#define HOSTFN0_INT_MSK 0x00014004
+#define HOST_PAGE_NUM_FN0 0x00014008
+#define __HOST_PAGE_NUM_FN 0x000001ff
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
+#define __MSIX_ERR_INDEX_FN 0x000001ff
+#define HOSTFN1_INT_STATUS 0x00014100
+#define __HOSTFN1_HALT_OCCURRED 0x01000000
+#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN1_INT_STATUS_LVL_SH 20
+#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
+#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN1_INT_STATUS_P_SH 16
+#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
+#define __HOSTFN1_INT_STATUS_F 0x0000ffff
+#define HOSTFN1_INT_MSK 0x00014104
+#define HOST_PAGE_NUM_FN1 0x00014108
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
+#define APP_PLL_425_CTL_REG 0x00014204
+#define __P_425_PLL_LOCK 0x80000000
+#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_425_RESET_TIMER_SH 17
+#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
+#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_425_CNTLMT0_1_SH 14
+#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
+#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_425_JITLMT0_1_SH 12
+#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
+#define __APP_PLL_425_HREF 0x00000800
+#define __APP_PLL_425_HDIV 0x00000400
+#define __APP_PLL_425_P0_1_MK 0x00000300
+#define __APP_PLL_425_P0_1_SH 8
+#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
+#define __APP_PLL_425_Z0_2_MK 0x000000e0
+#define __APP_PLL_425_Z0_2_SH 5
+#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
+#define __APP_PLL_425_RSEL200500 0x00000010
+#define __APP_PLL_425_ENARST 0x00000008
+#define __APP_PLL_425_BYPASS 0x00000004
+#define __APP_PLL_425_LRESETN 0x00000002
+#define __APP_PLL_425_ENABLE 0x00000001
+#define APP_PLL_312_CTL_REG 0x00014208
+#define __P_312_PLL_LOCK 0x80000000
+#define __ENABLE_MAC_AHB_1 0x00800000
+#define __ENABLE_MAC_AHB_0 0x00400000
+#define __ENABLE_MAC_1 0x00200000
+#define __ENABLE_MAC_0 0x00100000
+#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_312_RESET_TIMER_SH 17
+#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
+#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_312_CNTLMT0_1_SH 14
+#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
+#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_312_JITLMT0_1_SH 12
+#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
+#define __APP_PLL_312_HREF 0x00000800
+#define __APP_PLL_312_HDIV 0x00000400
+#define __APP_PLL_312_P0_1_MK 0x00000300
+#define __APP_PLL_312_P0_1_SH 8
+#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
+#define __APP_PLL_312_Z0_2_MK 0x000000e0
+#define __APP_PLL_312_Z0_2_SH 5
+#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
+#define __APP_PLL_312_RSEL200500 0x00000010
+#define __APP_PLL_312_ENARST 0x00000008
+#define __APP_PLL_312_BYPASS 0x00000004
+#define __APP_PLL_312_LRESETN 0x00000002
+#define __APP_PLL_312_ENABLE 0x00000001
+#define MBIST_CTL_REG 0x00014220
+#define __EDRAM_BISTR_START 0x00000004
+#define __MBIST_RESET 0x00000002
+#define __MBIST_START 0x00000001
+#define MBIST_STAT_REG 0x00014224
+#define __EDRAM_BISTR_STATUS 0x00000008
+#define __EDRAM_BISTR_DONE 0x00000004
+#define __MEM_BIT_STATUS 0x00000002
+#define __MBIST_DONE 0x00000001
+#define HOST_SEM0_REG 0x00014230
+#define __HOST_SEMAPHORE 0x00000001
+#define HOST_SEM1_REG 0x00014234
+#define HOST_SEM2_REG 0x00014238
+#define HOST_SEM3_REG 0x0001423c
+#define HOST_SEM0_INFO_REG 0x00014240
+#define HOST_SEM1_INFO_REG 0x00014244
+#define HOST_SEM2_INFO_REG 0x00014248
+#define HOST_SEM3_INFO_REG 0x0001424c
+#define ETH_MAC_SER_REG 0x00014288
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define HOSTFN2_INT_STATUS 0x00014300
+#define __HOSTFN2_HALT_OCCURRED 0x01000000
+#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN2_INT_STATUS_LVL_SH 20
+#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
+#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN2_INT_STATUS_P_SH 16
+#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
+#define __HOSTFN2_INT_STATUS_F 0x0000ffff
+#define HOSTFN2_INT_MSK 0x00014304
+#define HOST_PAGE_NUM_FN2 0x00014308
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
+#define HOSTFN3_INT_STATUS 0x00014400
+#define __HALT_OCCURRED 0x01000000
+#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN3_INT_STATUS_LVL_SH 20
+#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
+#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN3_INT_STATUS_P_SH 16
+#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
+#define __HOSTFN3_INT_STATUS_F 0x0000ffff
+#define HOSTFN3_INT_MSK 0x00014404
+#define HOST_PAGE_NUM_FN3 0x00014408
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
+#define FNC_ID_REG 0x00014600
+#define __FUNCTION_NUMBER 0x00000007
+#define FNC_PERS_REG 0x00014604
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+#define OP_MODE 0x0001460c
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define HOST_SEM4_REG 0x00014610
+#define HOST_SEM5_REG 0x00014614
+#define HOST_SEM6_REG 0x00014618
+#define HOST_SEM7_REG 0x0001461c
+#define HOST_SEM4_INFO_REG 0x00014620
+#define HOST_SEM5_INFO_REG 0x00014624
+#define HOST_SEM6_INFO_REG 0x00014628
+#define HOST_SEM7_INFO_REG 0x0001462c
+#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
+#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
+#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
+#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
+#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
+#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
+#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
+#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
+#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
+#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
+#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
+#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
+#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
+#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
+#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
+#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
+#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc
+#define CPE_PI_PTR_Q0 0x00038000
+#define __CPE_PI_UNUSED_MK 0xffff0000
+#define __CPE_PI_UNUSED_SH 16
+#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
+#define __CPE_PI_PTR 0x0000ffff
+#define CPE_PI_PTR_Q1 0x00038040
+#define CPE_CI_PTR_Q0 0x00038004
+#define __CPE_CI_UNUSED_MK 0xffff0000
+#define __CPE_CI_UNUSED_SH 16
+#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
+#define __CPE_CI_PTR 0x0000ffff
+#define CPE_CI_PTR_Q1 0x00038044
+#define CPE_DEPTH_Q0 0x00038008
+#define __CPE_DEPTH_UNUSED_MK 0xf8000000
+#define __CPE_DEPTH_UNUSED_SH 27
+#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
+#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
+#define __CPE_MSIX_VEC_INDEX_SH 16
+#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
+#define __CPE_DEPTH 0x0000ffff
+#define CPE_DEPTH_Q1 0x00038048
+#define CPE_QCTRL_Q0 0x0003800c
+#define __CPE_CTRL_UNUSED30_MK 0xfc000000
+#define __CPE_CTRL_UNUSED30_SH 26
+#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
+#define __CPE_FUNC_INT_CTRL_MK 0x03000000
+#define __CPE_FUNC_INT_CTRL_SH 24
+#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
+enum {
+ __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
+ __CPE_FUNC_INT_CTRL_F2NF = 0x1,
+ __CPE_FUNC_INT_CTRL_3QUART = 0x2,
+ __CPE_FUNC_INT_CTRL_HALF = 0x3,
+};
+#define __CPE_CTRL_UNUSED20_MK 0x00f00000
+#define __CPE_CTRL_UNUSED20_SH 20
+#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
+#define __CPE_SCI_TH_MK 0x000f0000
+#define __CPE_SCI_TH_SH 16
+#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
+#define __CPE_CTRL_UNUSED10_MK 0x0000c000
+#define __CPE_CTRL_UNUSED10_SH 14
+#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
+#define __CPE_ACK_PENDING 0x00002000
+#define __CPE_CTRL_UNUSED40_MK 0x00001c00
+#define __CPE_CTRL_UNUSED40_SH 10
+#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
+#define __CPE_PCIEID_MK 0x00000300
+#define __CPE_PCIEID_SH 8
+#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
+#define __CPE_CTRL_UNUSED00_MK 0x000000fe
+#define __CPE_CTRL_UNUSED00_SH 1
+#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
+#define __CPE_ESIZE 0x00000001
+#define CPE_QCTRL_Q1 0x0003804c
+#define __CPE_CTRL_UNUSED31_MK 0xfc000000
+#define __CPE_CTRL_UNUSED31_SH 26
+#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
+#define __CPE_CTRL_UNUSED21_MK 0x00f00000
+#define __CPE_CTRL_UNUSED21_SH 20
+#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
+#define __CPE_CTRL_UNUSED11_MK 0x0000c000
+#define __CPE_CTRL_UNUSED11_SH 14
+#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
+#define __CPE_CTRL_UNUSED41_MK 0x00001c00
+#define __CPE_CTRL_UNUSED41_SH 10
+#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
+#define __CPE_CTRL_UNUSED01_MK 0x000000fe
+#define __CPE_CTRL_UNUSED01_SH 1
+#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
+#define RME_PI_PTR_Q0 0x00038020
+#define __LATENCY_TIME_STAMP_MK 0xffff0000
+#define __LATENCY_TIME_STAMP_SH 16
+#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
+#define __RME_PI_PTR 0x0000ffff
+#define RME_PI_PTR_Q1 0x00038060
+#define RME_CI_PTR_Q0 0x00038024
+#define __DELAY_TIME_STAMP_MK 0xffff0000
+#define __DELAY_TIME_STAMP_SH 16
+#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
+#define __RME_CI_PTR 0x0000ffff
+#define RME_CI_PTR_Q1 0x00038064
+#define RME_DEPTH_Q0 0x00038028
+#define __RME_DEPTH_UNUSED_MK 0xf8000000
+#define __RME_DEPTH_UNUSED_SH 27
+#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
+#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
+#define __RME_MSIX_VEC_INDEX_SH 16
+#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
+#define __RME_DEPTH 0x0000ffff
+#define RME_DEPTH_Q1 0x00038068
+#define RME_QCTRL_Q0 0x0003802c
+#define __RME_INT_LATENCY_TIMER_MK 0xff000000
+#define __RME_INT_LATENCY_TIMER_SH 24
+#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
+#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
+#define __RME_INT_DELAY_TIMER_SH 16
+#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
+#define __RME_INT_DELAY_DISABLE 0x00008000
+#define __RME_DLY_DELAY_DISABLE 0x00004000
+#define __RME_ACK_PENDING 0x00002000
+#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
+#define __RME_CTRL_UNUSED10_MK 0x00000c00
+#define __RME_CTRL_UNUSED10_SH 10
+#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
+#define __RME_PCIEID_MK 0x00000300
+#define __RME_PCIEID_SH 8
+#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
+#define __RME_CTRL_UNUSED00_MK 0x000000fe
+#define __RME_CTRL_UNUSED00_SH 1
+#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
+#define __RME_ESIZE 0x00000001
+#define RME_QCTRL_Q1 0x0003806c
+#define __RME_CTRL_UNUSED11_MK 0x00000c00
+#define __RME_CTRL_UNUSED11_SH 10
+#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
+#define __RME_CTRL_UNUSED01_MK 0x000000fe
+#define __RME_CTRL_UNUSED01_SH 1
+#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
+#define PSS_CTL_REG 0x00018800
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810
+#define __PSS_LPU1_TCM_READ_ERR 0x00200000
+#define __PSS_LPU0_TCM_READ_ERR 0x00100000
+#define __PSS_LMEM5_CORR_ERR 0x00080000
+#define __PSS_LMEM4_CORR_ERR 0x00040000
+#define __PSS_LMEM3_CORR_ERR 0x00020000
+#define __PSS_LMEM2_CORR_ERR 0x00010000
+#define __PSS_LMEM1_CORR_ERR 0x00008000
+#define __PSS_LMEM0_CORR_ERR 0x00004000
+#define __PSS_LMEM5_UNCORR_ERR 0x00002000
+#define __PSS_LMEM4_UNCORR_ERR 0x00001000
+#define __PSS_LMEM3_UNCORR_ERR 0x00000800
+#define __PSS_LMEM2_UNCORR_ERR 0x00000400
+#define __PSS_LMEM1_UNCORR_ERR 0x00000200
+#define __PSS_LMEM0_UNCORR_ERR 0x00000100
+#define __PSS_BAL_PERR 0x00000080
+#define __PSS_DIP_IF_ERR 0x00000040
+#define __PSS_IOH_IF_ERR 0x00000020
+#define __PSS_TDS_IF_ERR 0x00000010
+#define __PSS_RDS_IF_ERR 0x00000008
+#define __PSS_SGM_IF_ERR 0x00000004
+#define __PSS_LPU1_RAM_ERR 0x00000002
+#define __PSS_LPU0_RAM_ERR 0x00000001
+#define ERR_SET_REG 0x00018818
+#define __PSS_ERR_STATUS_SET 0x003fffff
+#define PMM_1T_RESET_REG_P0 0x0002381c
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c
+#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
+#define __RXQ0_ADD_VECTORS_P 0x80000000
+#define __RXQ0_STOP_P 0x40000000
+#define __RXQ0_PRD_PTR_P 0x0000ffff
+#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
+#define __RXQ1_ADD_VECTORS_P 0x80000000
+#define __RXQ1_STOP_P 0x40000000
+#define __RXQ1_PRD_PTR_P 0x0000ffff
+#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
+#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
+#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
+#define __TXQ0_ADD_VECTORS_P 0x80000000
+#define __TXQ0_STOP_P 0x40000000
+#define __TXQ0_PRD_PTR_P 0x0000ffff
+#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
+#define __TXQ1_ADD_VECTORS_P 0x80000000
+#define __TXQ1_STOP_P 0x40000000
+#define __TXQ1_PRD_PTR_P 0x0000ffff
+#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
+#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
+#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
+#define __IB1_0_ACK_P 0x80000000
+#define __IB1_0_DISABLE_P 0x40000000
+#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB1_0_COALESCING_CFG_P_SH 16
+#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
+#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
+#define __IB1_1_ACK_P 0x80000000
+#define __IB1_1_DISABLE_P 0x40000000
+#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB1_1_COALESCING_CFG_P_SH 16
+#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
+#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
+#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
+#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
+#define __IB2_0_ACK_P 0x80000000
+#define __IB2_0_DISABLE_P 0x40000000
+#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB2_0_COALESCING_CFG_P_SH 16
+#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
+#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
+#define __IB2_1_ACK_P 0x80000000
+#define __IB2_1_DISABLE_P 0x40000000
+#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB2_1_COALESCING_CFG_P_SH 16
+#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
+#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
+#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
+
+/*
+ * These definitions are either in error/missing in spec. Its auto-generated
+ * from hard coded values in regparse.pl.
+ */
+#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
+#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
+#define __EMPHPRE_AT_4G_FIX 0x00000003
+#define __SFP_TXRATE_EN_FIX 0x00000100
+#define __SFP_RXRATE_EN_FIX 0x00000080
+
+/*
+ * These register definitions are auto-generated from hard coded values
+ * in regparse.pl.
+ */
+
+/*
+ * These register mapping definitions are auto-generated from mapping tables
+ * in regparse.pl.
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+
+#define CPE_DEPTH_Q(__n) \
+ (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
+#define CPE_QCTRL_Q(__n) \
+ (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
+#define CPE_PI_PTR_Q(__n) \
+ (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
+#define CPE_CI_PTR_Q(__n) \
+ (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
+#define RME_DEPTH_Q(__n) \
+ (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
+#define RME_QCTRL_Q(__n) \
+ (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
+#define RME_PI_PTR_Q(__n) \
+ (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
+#define RME_CI_PTR_Q(__n) \
+ (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
+#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
+ * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
+ * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
+ * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
+ * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
+ * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
+ * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
+ * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
+ * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define CPE_Q_MASK(__q) ((__q) & 0x3)
+#define RME_Q_MASK(__q) ((__q) & 0x3)
+
+/*
+ * PCI MSI-X vector defines
+ */
+enum {
+ BFA_MSIX_CPE_Q0 = 0,
+ BFA_MSIX_CPE_Q1 = 1,
+ BFA_MSIX_CPE_Q2 = 2,
+ BFA_MSIX_CPE_Q3 = 3,
+ BFA_MSIX_RME_Q0 = 4,
+ BFA_MSIX_RME_Q1 = 5,
+ BFA_MSIX_RME_Q2 = 6,
+ BFA_MSIX_RME_Q3 = 7,
+ BFA_MSIX_LPU_ERR = 8,
+ BFA_MSIX_CT_MAX = 9,
+};
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+
+/*
+ * catapult memory map.
+ */
+#define LL_PGN_HQM0 0x0096
+#define LL_PGN_HQM1 0x0097
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+/*
+ * End of catapult memory map
+ */
+
+#endif /* __BFI_CTREG_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_LL_H__
+#define __BFI_LL_H__
+
+#include "bfi.h"
+
+#pragma pack(1)
+
+/**
+ * @brief
+ * "enums" for all LL mailbox messages other than IOC
+ */
+enum {
+ BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
+ BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
+ BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
+
+ BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
+ BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
+ BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
+ BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
+
+ BFI_LL_H2I_PORT_ADMIN_REQ = 8,
+ BFI_LL_H2I_STATS_GET_REQ = 9,
+ BFI_LL_H2I_STATS_CLEAR_REQ = 10,
+
+ BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
+ BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
+
+ BFI_LL_H2I_TXQ_STOP_REQ = 13,
+ BFI_LL_H2I_RXQ_STOP_REQ = 14,
+
+ BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
+
+ BFI_LL_H2I_SET_PAUSE_REQ = 16,
+ BFI_LL_H2I_MTU_INFO_REQ = 17,
+
+ BFI_LL_H2I_RX_REQ = 18,
+} ;
+
+enum {
+ BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
+ BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
+ BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
+
+ BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
+ BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
+ BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
+ BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
+
+ BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
+ BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
+ BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
+
+ BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
+ BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
+
+ BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
+ BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
+
+ BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
+
+ BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
+
+ BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
+ BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
+
+ BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
+ BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
+
+ BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
+ BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
+} ;
+
+/**
+ * @brief bfi_ll_mac_addr_req is used by:
+ * BFI_LL_H2I_MAC_UCAST_SET_REQ
+ * BFI_LL_H2I_MAC_UCAST_ADD_REQ
+ * BFI_LL_H2I_MAC_UCAST_DEL_REQ
+ * BFI_LL_H2I_MAC_MCAST_ADD_REQ
+ * BFI_LL_H2I_MAC_MCAST_DEL_REQ
+ */
+struct bfi_ll_mac_addr_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 rsvd1[3];
+ mac_t mac_addr;
+ u8 rsvd2[2];
+};
+
+/**
+ * @brief bfi_ll_mcast_filter_req is used by:
+ * BFI_LL_H2I_MAC_MCAST_FILTER_REQ
+ */
+struct bfi_ll_mcast_filter_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 enable;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_mcast_del_all is used by:
+ * BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
+ */
+struct bfi_ll_mcast_del_all_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_q_stop_req is used by:
+ * BFI_LL_H2I_TXQ_STOP_REQ
+ * BFI_LL_H2I_RXQ_STOP_REQ
+ */
+struct bfi_ll_q_stop_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 q_id_mask[2]; /* !< bit-mask for queue ids */
+};
+
+/**
+ * @brief bfi_ll_stats_req is used by:
+ * BFI_LL_I2H_STATS_GET_REQ
+ * BFI_LL_I2H_STATS_CLEAR_REQ
+ */
+struct bfi_ll_stats_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u16 stats_mask; /* !< bit-mask for non-function statistics */
+ u8 rsvd[2];
+ u32 rxf_id_mask[2]; /* !< bit-mask for RxF Statistics */
+ u32 txf_id_mask[2]; /* !< bit-mask for TxF Statistics */
+ union bfi_addr_u host_buffer; /* !< where statistics are returned */
+};
+
+/**
+ * @brief defines for "stats_mask" above.
+ */
+#define BFI_LL_STATS_MAC (1 << 0) /* !< MAC Statistics */
+#define BFI_LL_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
+#define BFI_LL_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
+#define BFI_LL_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
+#define BFI_LL_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+
+#define BFI_LL_STATS_ALL 0x1f
+
+/**
+ * @brief bfi_ll_port_admin_req
+ */
+struct bfi_ll_port_admin_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 up;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_rxf_req is used by:
+ * BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
+ * BFI_LL_H2I_RXF_DEFAULT_SET_REQ
+ */
+struct bfi_ll_rxf_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 enable;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_rxf_multi_req is used by:
+ * BFI_LL_H2I_RX_REQ
+ */
+struct bfi_ll_rxf_multi_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 rxf_id_mask[2];
+ u8 enable;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief enum for Loopback opmodes
+ */
+enum {
+ BFI_LL_DIAG_LB_OPMODE_EXT = 0,
+ BFI_LL_DIAG_LB_OPMODE_CBL = 1,
+};
+
+/**
+ * @brief bfi_ll_set_pause_req is used by:
+ * BFI_LL_H2I_SET_PAUSE_REQ
+ */
+struct bfi_ll_set_pause_req {
+ struct bfi_mhdr mh;
+ u8 tx_pause; /* 1 = enable, 0 = disable */
+ u8 rx_pause; /* 1 = enable, 0 = disable */
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_mtu_info_req is used by:
+ * BFI_LL_H2I_MTU_INFO_REQ
+ */
+struct bfi_ll_mtu_info_req {
+ struct bfi_mhdr mh;
+ u16 mtu;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief
+ * Response header format used by all responses
+ * For both responses and asynchronous notifications
+ */
+struct bfi_ll_rsp {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 error;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_cee_aen is used by:
+ * BFI_LL_I2H_LINK_DOWN_AEN
+ * BFI_LL_I2H_LINK_UP_AEN
+ */
+struct bfi_ll_aen {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 reason;
+ u8 cee_linkup;
+ u8 prio_map; /*!< LL priority bit-map */
+ u8 rsvd[2];
+};
+
+/**
+ * @brief
+ * The following error codes can be returned
+ * by the mbox commands
+ */
+enum {
+ BFI_LL_CMD_OK = 0,
+ BFI_LL_CMD_FAIL = 1,
+ BFI_LL_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
+ BFI_LL_CMD_CAM_FULL = 3, /* !< CAM is full */
+ BFI_LL_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
+ BFI_LL_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
+ BFI_LL_CMD_WAITING = 6, /* !< Waiting for completion (VMware) */
+ BFI_LL_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
+} ;
+
+/* Statistics */
+#define BFI_LL_TXF_ID_MAX 64
+#define BFI_LL_RXF_ID_MAX 64
+
+/* TxF Frame Statistics */
+struct bfi_ll_stats_txf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+
+ u64 errors;
+ u64 filter_vlan; /* frames filtered due to VLAN */
+ u64 filter_mac_sa; /* frames filtered due to SA check */
+};
+
+/* RxF Frame Statistics */
+struct bfi_ll_stats_rxf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+ u64 frame_drops;
+};
+
+/* FC Tx Frame Statistics */
+struct bfi_ll_stats_fc_tx {
+ u64 txf_ucast_octets;
+ u64 txf_ucast;
+ u64 txf_ucast_vlan;
+
+ u64 txf_mcast_octets;
+ u64 txf_mcast;
+ u64 txf_mcast_vlan;
+
+ u64 txf_bcast_octets;
+ u64 txf_bcast;
+ u64 txf_bcast_vlan;
+
+ u64 txf_parity_errors;
+ u64 txf_timeout;
+ u64 txf_fid_parity_errors;
+};
+
+/* FC Rx Frame Statistics */
+struct bfi_ll_stats_fc_rx {
+ u64 rxf_ucast_octets;
+ u64 rxf_ucast;
+ u64 rxf_ucast_vlan;
+
+ u64 rxf_mcast_octets;
+ u64 rxf_mcast;
+ u64 rxf_mcast_vlan;
+
+ u64 rxf_bcast_octets;
+ u64 rxf_bcast;
+ u64 rxf_bcast_vlan;
+};
+
+/* RAD Frame Statistics */
+struct bfi_ll_stats_rad {
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_vlan_frames;
+
+ u64 rx_ucast;
+ u64 rx_ucast_octets;
+ u64 rx_ucast_vlan;
+
+ u64 rx_mcast;
+ u64 rx_mcast_octets;
+ u64 rx_mcast_vlan;
+
+ u64 rx_bcast;
+ u64 rx_bcast_octets;
+ u64 rx_bcast_vlan;
+
+ u64 rx_drops;
+};
+
+/* BPC Tx Registers */
+struct bfi_ll_stats_bpc {
+ /* transmit stats */
+ u64 tx_pause[8];
+ u64 tx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 tx_first_pause[8];
+
+ /* receive stats */
+ u64 rx_pause[8];
+ u64 rx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 rx_first_pause[8];
+};
+
+/* MAC Rx Statistics */
+struct bfi_ll_stats_mac {
+ u64 frame_64; /* both rx and tx counter */
+ u64 frame_65_127; /* both rx and tx counter */
+ u64 frame_128_255; /* both rx and tx counter */
+ u64 frame_256_511; /* both rx and tx counter */
+ u64 frame_512_1023; /* both rx and tx counter */
+ u64 frame_1024_1518; /* both rx and tx counter */
+ u64 frame_1519_1522; /* both rx and tx counter */
+
+ /* receive stats */
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_fcs_error;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_control_frames;
+ u64 rx_pause;
+ u64 rx_unknown_opcode;
+ u64 rx_alignment_error;
+ u64 rx_frame_length_error;
+ u64 rx_code_error;
+ u64 rx_carrier_sense_error;
+ u64 rx_undersize;
+ u64 rx_oversize;
+ u64 rx_fragments;
+ u64 rx_jabber;
+ u64 rx_drop;
+
+ /* transmit stats */
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_deferral;
+ u64 tx_excessive_deferral;
+ u64 tx_single_collision;
+ u64 tx_muliple_collision;
+ u64 tx_late_collision;
+ u64 tx_excessive_collision;
+ u64 tx_total_collision;
+ u64 tx_pause_honored;
+ u64 tx_drop;
+ u64 tx_jabber;
+ u64 tx_fcs_error;
+ u64 tx_control_frame;
+ u64 tx_oversize;
+ u64 tx_undersize;
+ u64 tx_fragments;
+};
+
+/* Complete statistics */
+struct bfi_ll_stats {
+ struct bfi_ll_stats_mac mac_stats;
+ struct bfi_ll_stats_bpc bpc_stats;
+ struct bfi_ll_stats_rad rad_stats;
+ struct bfi_ll_stats_fc_rx fc_rx_stats;
+ struct bfi_ll_stats_fc_tx fc_tx_stats;
+ struct bfi_ll_stats_rxf rxf_stats[BFI_LL_RXF_ID_MAX];
+ struct bfi_ll_stats_txf txf_stats[BFI_LL_TXF_ID_MAX];
+};
+
+#pragma pack()
+
+#endif /* __BFI_LL_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BNA_H__
+#define __BNA_H__
+
+#include "bfa_wc.h"
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi_ll.h"
+#include "bna_types.h"
+
+extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
+extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+
+/**
+ *
+ * Macros and constants
+ *
+ */
+
+#define BNA_IOC_TIMER_FREQ 200
+
+/* Log string size */
+#define BNA_MESSAGE_SIZE 256
+
+#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
+
+/* MBOX API for PORT, TX, RX */
+#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
+do { \
+ memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
+ (_qe)->cbfn = (_cbfn); \
+ (_qe)->cbarg = (_cbarg); \
+} while (0)
+
+#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
+
+#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
+ (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
+
+#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
+
+#define BNA_TO_POWER_OF_2(x) \
+do { \
+ int _shift = 0; \
+ while ((x) && (x) != 1) { \
+ (x) >>= 1; \
+ _shift++; \
+ } \
+ (x) <<= _shift; \
+} while (0)
+
+#define BNA_TO_POWER_OF_2_HIGH(x) \
+do { \
+ int n = 1; \
+ while (n < (x)) \
+ n <<= 1; \
+ (x) = n; \
+} while (0)
+
+/*
+ * input : _addr-> os dma addr in host endian format,
+ * output : _bna_dma_addr-> pointer to hw dma addr
+ */
+#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
+do { \
+ u64 tmp_addr = \
+ cpu_to_be64((u64)(_addr)); \
+ (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
+ (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
+} while (0)
+
+/*
+ * input : _bna_dma_addr-> pointer to hw dma addr
+ * output : _addr-> os dma addr in host endian format
+ */
+#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
+do { \
+ (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
+ | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
+} while (0)
+
+#define containing_rec(addr, type, field) \
+ ((type *)((unsigned char *)(addr) - \
+ (unsigned char *)(&((type *)0)->field)))
+
+#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
+
+/* TxQ element is 64 bytes */
+#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
+#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
+
+#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
+ (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
+}
+
+/* RxQ element is 8 bytes */
+#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
+#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
+
+#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
+}
+
+/* CQ element is 16 bytes */
+#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
+#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
+
+#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ \
+ page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
+}
+
+#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
+ (&((_cast *)(_q_base))[(_qe_idx)])
+
+#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
+
+#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
+ ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
+
+#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
+ (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
+ (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
+ ((_q_depth) - 1))
+
+#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
+ ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
+ (_q_depth - 1))
+
+#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
+
+#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
+
+#define BNA_Q_PI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.producer_index = \
+ (((_q_ptr)->q.producer_index + (_num)) & \
+ ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_CI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.consumer_index = \
+ (((_q_ptr)->q.consumer_index + (_num)) \
+ & ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_FREE_COUNT(_q_ptr) \
+ (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
+
+#define BNA_Q_IN_USE_COUNT(_q_ptr) \
+ (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
+
+/* These macros build the data portion of the TxQ/RxQ doorbell */
+#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
+#define BNA_DOORBELL_Q_STOP (0x40000000)
+
+/* These macros build the data portion of the IB doorbell */
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* Set the coalescing timer for the given ib */
+#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
+ ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
+
+/* Acks 'events' # of events for a given ib */
+#define bna_ib_ack(_i_dbell, _events) \
+ (writel(((_i_dbell)->doorbell_ack | (_events)), \
+ (_i_dbell)->doorbell_addr));
+
+#define bna_txq_prod_indx_doorbell(_tcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
+ (_tcb)->q_dbell));
+
+#define bna_rxq_prod_indx_doorbell(_rcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
+ (_rcb)->q_dbell));
+
+#define BNA_LARGE_PKT_SIZE 1000
+
+#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
+do { \
+ if ((_len) > BNA_LARGE_PKT_SIZE) { \
+ (_pkt)->large_pkt_cnt++; \
+ } else { \
+ (_pkt)->small_pkt_cnt++; \
+ } \
+} while (0)
+
+#define call_rxf_stop_cbfn(rxf, status) \
+ if ((rxf)->stop_cbfn) { \
+ (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
+ (rxf)->stop_cbfn = NULL; \
+ (rxf)->stop_cbarg = NULL; \
+ }
+
+#define call_rxf_start_cbfn(rxf, status) \
+ if ((rxf)->start_cbfn) { \
+ (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
+ (rxf)->start_cbfn = NULL; \
+ (rxf)->start_cbarg = NULL; \
+ }
+
+#define call_rxf_cam_fltr_cbfn(rxf, status) \
+ if ((rxf)->cam_fltr_cbfn) { \
+ (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
+ (status)); \
+ (rxf)->cam_fltr_cbfn = NULL; \
+ (rxf)->cam_fltr_cbarg = NULL; \
+ }
+
+#define call_rxf_pause_cbfn(rxf, status) \
+ if ((rxf)->oper_state_cbfn) { \
+ (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
+ (status)); \
+ (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
+ (rxf)->oper_state_cbfn = NULL; \
+ (rxf)->oper_state_cbarg = NULL; \
+ }
+
+#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
+
+#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
+
+#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
+
+#define xxx_enable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode |= xxx; \
+} while (0)
+
+#define xxx_disable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define xxx_inactive(mode, bitmask, xxx) \
+do { \
+ bitmask &= ~xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define is_promisc_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_promisc_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_default_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_default_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_allmulti_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define is_allmulti_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define GET_RXQS(rxp, q0, q1) do { \
+ switch ((rxp)->type) { \
+ case BNA_RXP_SINGLE: \
+ (q0) = rxp->rxq.single.only; \
+ (q1) = NULL; \
+ break; \
+ case BNA_RXP_SLR: \
+ (q0) = rxp->rxq.slr.large; \
+ (q1) = rxp->rxq.slr.small; \
+ break; \
+ case BNA_RXP_HDS: \
+ (q0) = rxp->rxq.hds.data; \
+ (q1) = rxp->rxq.hds.hdr; \
+ break; \
+ } \
+} while (0)
+
+/**
+ *
+ * Function prototypes
+ *
+ */
+
+/**
+ * BNA
+ */
+
+/* Internal APIs */
+void bna_adv_res_req(struct bna_res_info *res_info);
+
+/* APIs for BNAD */
+void bna_res_req(struct bna_res_info *res_info);
+void bna_init(struct bna *bna, struct bnad *bnad,
+ struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info);
+void bna_uninit(struct bna *bna);
+void bna_stats_get(struct bna *bna);
+void bna_stats_clr(struct bna *bna);
+void bna_get_perm_mac(struct bna *bna, u8 *mac);
+
+/* APIs for Rx */
+int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
+
+/* APIs for RxF */
+struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
+void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
+ struct bna_mac *mac);
+struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
+void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mac *mac);
+struct bna_rit_segment *
+bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
+void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
+ struct bna_rit_segment *seg);
+
+/**
+ * DEVICE
+ */
+
+/* Interanl APIs */
+void bna_adv_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info);
+
+/* APIs for BNA */
+void bna_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_device_uninit(struct bna_device *device);
+void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
+int bna_device_status_get(struct bna_device *device);
+int bna_device_state_get(struct bna_device *device);
+
+/* APIs for BNAD */
+void bna_device_enable(struct bna_device *device);
+void bna_device_disable(struct bna_device *device,
+ enum bna_cleanup_type type);
+
+/**
+ * MBOX
+ */
+
+/* APIs for DEVICE */
+void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
+void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
+void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
+void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
+
+/* APIs for PORT, TX, RX */
+void bna_mbox_handler(struct bna *bna, u32 intr_status);
+void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
+
+/**
+ * PORT
+ */
+
+/* APIs for BNA */
+void bna_port_init(struct bna_port *port, struct bna *bna);
+void bna_port_uninit(struct bna_port *port);
+int bna_port_state_get(struct bna_port *port);
+int bna_llport_state_get(struct bna_llport *llport);
+
+/* APIs for DEVICE */
+void bna_port_start(struct bna_port *port);
+void bna_port_stop(struct bna_port *port);
+void bna_port_fail(struct bna_port *port);
+
+/* API for RX */
+int bna_port_mtu_get(struct bna_port *port);
+void bna_llport_admin_up(struct bna_llport *llport);
+void bna_llport_admin_down(struct bna_llport *llport);
+
+/* API for BNAD */
+void bna_port_enable(struct bna_port *port);
+void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
+ void (*cbfn)(void *, enum bna_cb_status));
+void bna_port_pause_config(struct bna_port *port,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *, enum bna_cb_status));
+void bna_port_mtu_set(struct bna_port *port, int mtu,
+ void (*cbfn)(struct bnad *, enum bna_cb_status));
+void bna_port_mac_get(struct bna_port *port, mac_t *mac);
+void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
+void bna_port_linkcbfn_set(struct bna_port *port,
+ void (*linkcbfn)(struct bnad *,
+ enum bna_link_status));
+void bna_port_admin_up(struct bna_port *port);
+void bna_port_admin_down(struct bna_port *port);
+
+/* Callbacks for TX, RX */
+void bna_port_cb_tx_stopped(struct bna_port *port,
+ enum bna_cb_status status);
+void bna_port_cb_rx_stopped(struct bna_port *port,
+ enum bna_cb_status status);
+
+/* Callbacks for MBOX */
+void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+ int status);
+void bna_port_cb_link_down(struct bna_port *port, int status);
+
+/**
+ * IB
+ */
+
+/* APIs for BNA */
+void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
+
+/* APIs for TX, RX */
+struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
+ enum bna_intr_type intr_type, int vector);
+void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
+int bna_ib_reserve_idx(struct bna_ib *ib);
+void bna_ib_release_idx(struct bna_ib *ib, int idx);
+int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
+void bna_ib_start(struct bna_ib *ib);
+void bna_ib_stop(struct bna_ib *ib);
+void bna_ib_fail(struct bna_ib *ib);
+void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
+
+/**
+ * TX MODULE AND TX
+ */
+
+/* Internal APIs */
+void bna_tx_prio_changed(struct bna_tx *tx, int prio);
+
+/* APIs for BNA */
+void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
+int bna_tx_state_get(struct bna_tx *tx);
+
+/* APIs for PORT */
+void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
+void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
+void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
+
+/* APIs for BNAD */
+void bna_tx_res_req(int num_txq, int txq_depth,
+ struct bna_res_info *res_info);
+struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_tx_destroy(struct bna_tx *tx);
+void bna_tx_enable(struct bna_tx *tx);
+void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_tx_prio_set(struct bna_tx *tx, int prio,
+ void (*cbfn)(struct bnad *, struct bna_tx *,
+ enum bna_cb_status));
+void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
+
+/**
+ * RX MODULE, RX, RXF
+ */
+
+/* Internal APIs */
+void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
+void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
+ const struct bna_mac *mac_addr);
+void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
+void bna_rxf_adv_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config);
+int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
+int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
+int rxf_process_packet_filter_default(struct bna_rxf *rxf);
+int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
+
+/* APIs for BNA */
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
+int bna_rx_state_get(struct bna_rx *rx);
+int bna_rxf_state_get(struct bna_rxf *rxf);
+
+/* APIs for PORT */
+void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
+
+/* APIs for BNAD */
+void bna_rx_res_req(struct bna_rx_config *rx_config,
+ struct bna_res_info *res_info);
+struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_rx_destroy(struct bna_rx *rx);
+void bna_rx_enable(struct bna_rx *rx);
+void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
+void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
+void bna_rx_dim_update(struct bna_ccb *ccb);
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlanfilter_disable(struct bna_rx *rx);
+void bna_rx_rss_enable(struct bna_rx *rx);
+void bna_rx_rss_disable(struct bna_rx *rx);
+void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
+void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
+ int nvectors);
+void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_hds_disable(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_receive_pause(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_receive_resume(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+
+/* RxF APIs for RX */
+void bna_rxf_start(struct bna_rxf *rxf);
+void bna_rxf_stop(struct bna_rxf *rxf);
+void bna_rxf_fail(struct bna_rxf *rxf);
+void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
+ struct bna_rx_config *q_config);
+void bna_rxf_uninit(struct bna_rxf *rxf);
+
+/* Callback from RXF to RX */
+void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
+void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
+
+/**
+ * BNAD
+ */
+
+/* Callbacks for BNA */
+void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats);
+void bnad_cb_stats_clr(struct bnad *bnad);
+
+/* Callbacks for DEVICE */
+void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
+void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
+void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
+void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
+
+/* Callbacks for port */
+void bnad_cb_port_link_status(struct bnad *bnad,
+ enum bna_link_status status);
+
+#endif /* __BNA_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfa_sm.h"
+#include "bfa_wc.h"
+
+/**
+ * MBOX
+ */
+static int
+bna_is_aen(u8 msg_id)
+{
+ return (msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
+ msg_id == BFI_LL_I2H_LINK_UP_AEN);
+}
+
+static void
+bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
+{
+ struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
+
+ switch (aen->mh.msg_id) {
+ case BFI_LL_I2H_LINK_UP_AEN:
+ bna_port_cb_link_up(&bna->port, aen, aen->reason);
+ break;
+ case BFI_LL_I2H_LINK_DOWN_AEN:
+ bna_port_cb_link_down(&bna->port, aen->reason);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
+{
+ struct bna *bna = (struct bna *)(llarg);
+ struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
+ struct bfi_mhdr *cmd_h, *rsp_h;
+ struct bna_mbox_qe *mb_qe = NULL;
+ int to_post = 0;
+ u8 aen = 0;
+ char message[BNA_MESSAGE_SIZE];
+
+ aen = bna_is_aen(mb_rsp->mh.msg_id);
+
+ if (!aen) {
+ mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
+ cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
+ rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
+
+ if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
+ (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
+ /* Remove the request from posted_q, update state */
+ list_del(&mb_qe->qe);
+ bna->mbox_mod.msg_pending--;
+ if (list_empty(&bna->mbox_mod.posted_q))
+ bna->mbox_mod.state = BNA_MBOX_FREE;
+ else
+ to_post = 1;
+
+ /* Dispatch the cbfn */
+ if (mb_qe->cbfn)
+ mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
+
+ /* Post the next entry, if needed */
+ if (to_post) {
+ mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
+ bfa_ioc_mbox_queue(&bna->device.ioc,
+ &mb_qe->cmd);
+ }
+ } else {
+ snprintf(message, BNA_MESSAGE_SIZE,
+ "No matching rsp for [%d:%d:%d]\n",
+ mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
+ mb_rsp->mh.mtag.i2htok);
+ pr_info("%s", message);
+ }
+
+ } else
+ bna_mbox_aen_callback(bna, msg);
+}
+
+void
+bna_err_handler(struct bna *bna, u32 intr_status)
+{
+ u32 init_halt;
+
+ if (intr_status & __HALT_STATUS_BITS) {
+ init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
+ init_halt &= ~__FW_INIT_HALT_P;
+ writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
+ }
+
+ bfa_ioc_error_isr(&bna->device.ioc);
+}
+
+void
+bna_mbox_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_ERR_INTR(intr_status)) {
+ bna_err_handler(bna, intr_status);
+ return;
+ }
+ if (BNA_IS_MBOX_INTR(intr_status))
+ bfa_ioc_mbox_isr(&bna->device.ioc);
+}
+
+void
+bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
+{
+ struct bfi_mhdr *mh;
+
+ mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
+
+ mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
+ bna->mbox_mod.msg_ctr++;
+ bna->mbox_mod.msg_pending++;
+ if (bna->mbox_mod.state == BNA_MBOX_FREE) {
+ list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+ bfa_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
+ bna->mbox_mod.state = BNA_MBOX_POSTED;
+ } else {
+ list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+ }
+}
+
+void
+bna_mbox_flush_q(struct bna *bna, struct list_head *q)
+{
+ struct bna_mbox_qe *mb_qe = NULL;
+ struct bfi_mhdr *cmd_h;
+ struct list_head *mb_q;
+ void (*cbfn)(void *arg, int status);
+ void *cbarg;
+
+ mb_q = &bna->mbox_mod.posted_q;
+
+ while (!list_empty(mb_q)) {
+ bfa_q_deq(mb_q, &mb_qe);
+ cbfn = mb_qe->cbfn;
+ cbarg = mb_qe->cbarg;
+ bfa_q_qe_init(mb_qe);
+ bna->mbox_mod.msg_pending--;
+
+ cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
+ if (cbfn)
+ cbfn(cbarg, BNA_CB_NOT_EXEC);
+ }
+
+ bna->mbox_mod.state = BNA_MBOX_FREE;
+}
+
+void
+bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
+{
+}
+
+void
+bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
+{
+ bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
+}
+
+void
+bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
+{
+ bfa_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
+ mbox_mod->state = BNA_MBOX_FREE;
+ mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
+ INIT_LIST_HEAD(&mbox_mod->posted_q);
+ mbox_mod->bna = bna;
+}
+
+void
+bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
+{
+ mbox_mod->bna = NULL;
+}
+
+/**
+ * LLPORT
+ */
+#define call_llport_stop_cbfn(llport, status)\
+do {\
+ if ((llport)->stop_cbfn)\
+ (llport)->stop_cbfn(&(llport)->bna->port, status);\
+ (llport)->stop_cbfn = NULL;\
+} while (0)
+
+static void bna_fw_llport_up(struct bna_llport *llport);
+static void bna_fw_cb_llport_up(void *arg, int status);
+static void bna_fw_llport_down(struct bna_llport *llport);
+static void bna_fw_cb_llport_down(void *arg, int status);
+static void bna_llport_start(struct bna_llport *llport);
+static void bna_llport_stop(struct bna_llport *llport);
+static void bna_llport_fail(struct bna_llport *llport);
+
+enum bna_llport_event {
+ LLPORT_E_START = 1,
+ LLPORT_E_STOP = 2,
+ LLPORT_E_FAIL = 3,
+ LLPORT_E_UP = 4,
+ LLPORT_E_DOWN = 5,
+ LLPORT_E_FWRESP_UP = 6,
+ LLPORT_E_FWRESP_DOWN = 7
+};
+
+enum bna_llport_state {
+ BNA_LLPORT_STOPPED = 1,
+ BNA_LLPORT_DOWN = 2,
+ BNA_LLPORT_UP_RESP_WAIT = 3,
+ BNA_LLPORT_DOWN_RESP_WAIT = 4,
+ BNA_LLPORT_UP = 5,
+ BNA_LLPORT_LAST_RESP_WAIT = 6
+};
+
+bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+
+static struct bfa_sm_table llport_sm_table[] = {
+ {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
+ {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
+ {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
+ {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
+ {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
+ {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
+};
+
+static void
+bna_llport_sm_stopped_entry(struct bna_llport *llport)
+{
+ llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
+ call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
+}
+
+static void
+bna_llport_sm_stopped(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_START:
+ bfa_fsm_set_state(llport, bna_llport_sm_down);
+ break;
+
+ case LLPORT_E_STOP:
+ call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
+ break;
+
+ case LLPORT_E_FAIL:
+ break;
+
+ case LLPORT_E_DOWN:
+ /* This event is received due to Rx objects failing */
+ /* No-op */
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ case LLPORT_E_FWRESP_DOWN:
+ /**
+ * These events are received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_down_entry(struct bna_llport *llport)
+{
+ bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
+}
+
+static void
+bna_llport_sm_down(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
+ bna_fw_llport_up(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
+{
+ /**
+ * NOTE: Do not call bna_fw_llport_up() here. That will over step
+ * mbox due to down_resp_wait -> up_resp_wait transition on event
+ * LLPORT_E_UP
+ */
+}
+
+static void
+bna_llport_sm_up_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
+ bna_fw_llport_up(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
+{
+ /**
+ * NOTE: Do not call bna_fw_llport_down() here. That will over step
+ * mbox due to up_resp_wait -> down_resp_wait transition on event
+ * LLPORT_E_DOWN
+ */
+}
+
+static void
+bna_llport_sm_down_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_up_entry(struct bna_llport *llport)
+{
+}
+
+static void
+bna_llport_sm_up(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
+ bna_fw_llport_down(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
+{
+}
+
+static void
+bna_llport_sm_last_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ /**
+ * This event is received due to Rx objects stopping in
+ * parallel to llport
+ */
+ /* No-op */
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_fw_llport_admin_up(struct bna_llport *llport)
+{
+ struct bfi_ll_port_admin_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.up = BNA_STATUS_T_ENABLED;
+
+ bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_llport_up, llport);
+
+ bna_mbox_send(llport->bna, &llport->mbox_qe);
+}
+
+static void
+bna_fw_llport_up(struct bna_llport *llport)
+{
+ if (llport->type == BNA_PORT_T_REGULAR)
+ bna_fw_llport_admin_up(llport);
+}
+
+static void
+bna_fw_cb_llport_up(void *arg, int status)
+{
+ struct bna_llport *llport = (struct bna_llport *)arg;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
+}
+
+static void
+bna_fw_llport_admin_down(struct bna_llport *llport)
+{
+ struct bfi_ll_port_admin_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.up = BNA_STATUS_T_DISABLED;
+
+ bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_llport_down, llport);
+
+ bna_mbox_send(llport->bna, &llport->mbox_qe);
+}
+
+static void
+bna_fw_llport_down(struct bna_llport *llport)
+{
+ if (llport->type == BNA_PORT_T_REGULAR)
+ bna_fw_llport_admin_down(llport);
+}
+
+static void
+bna_fw_cb_llport_down(void *arg, int status)
+{
+ struct bna_llport *llport = (struct bna_llport *)arg;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
+}
+
+void
+bna_port_cb_llport_stopped(struct bna_port *port,
+ enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+static void
+bna_llport_init(struct bna_llport *llport, struct bna *bna)
+{
+ llport->flags |= BNA_LLPORT_F_ENABLED;
+ llport->type = BNA_PORT_T_REGULAR;
+ llport->bna = bna;
+
+ llport->link_status = BNA_LINK_DOWN;
+
+ llport->admin_up_count = 0;
+
+ llport->stop_cbfn = NULL;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+}
+
+static void
+bna_llport_uninit(struct bna_llport *llport)
+{
+ llport->flags &= ~BNA_LLPORT_F_ENABLED;
+
+ llport->bna = NULL;
+}
+
+static void
+bna_llport_start(struct bna_llport *llport)
+{
+ bfa_fsm_send_event(llport, LLPORT_E_START);
+}
+
+static void
+bna_llport_stop(struct bna_llport *llport)
+{
+ llport->stop_cbfn = bna_port_cb_llport_stopped;
+
+ bfa_fsm_send_event(llport, LLPORT_E_STOP);
+}
+
+static void
+bna_llport_fail(struct bna_llport *llport)
+{
+ bfa_fsm_send_event(llport, LLPORT_E_FAIL);
+}
+
+int
+bna_llport_state_get(struct bna_llport *llport)
+{
+ return bfa_sm_to_state(llport_sm_table, llport->fsm);
+}
+
+void
+bna_llport_admin_up(struct bna_llport *llport)
+{
+ llport->admin_up_count++;
+
+ if (llport->admin_up_count == 1) {
+ llport->flags |= BNA_LLPORT_F_RX_ENABLED;
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_UP);
+ }
+}
+
+void
+bna_llport_admin_down(struct bna_llport *llport)
+{
+ llport->admin_up_count--;
+
+ if (llport->admin_up_count == 0) {
+ llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+ }
+}
+
+/**
+ * PORT
+ */
+#define bna_port_chld_start(port)\
+do {\
+ enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bna_llport_start(&(port)->llport);\
+ bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
+ bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_chld_stop(port)\
+do {\
+ enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bna_llport_stop(&(port)->llport);\
+ bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
+ bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_chld_fail(port)\
+do {\
+ bna_llport_fail(&(port)->llport);\
+ bna_tx_mod_fail(&(port)->bna->tx_mod);\
+ bna_rx_mod_fail(&(port)->bna->rx_mod);\
+} while (0)
+
+#define bna_port_rx_start(port)\
+do {\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_rx_stop(port)\
+do {\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define call_port_stop_cbfn(port, status)\
+do {\
+ if ((port)->stop_cbfn)\
+ (port)->stop_cbfn((port)->stop_cbarg, status);\
+ (port)->stop_cbfn = NULL;\
+ (port)->stop_cbarg = NULL;\
+} while (0)
+
+#define call_port_pause_cbfn(port, status)\
+do {\
+ if ((port)->pause_cbfn)\
+ (port)->pause_cbfn((port)->bna->bnad, status);\
+ (port)->pause_cbfn = NULL;\
+} while (0)
+
+#define call_port_mtu_cbfn(port, status)\
+do {\
+ if ((port)->mtu_cbfn)\
+ (port)->mtu_cbfn((port)->bna->bnad, status);\
+ (port)->mtu_cbfn = NULL;\
+} while (0)
+
+static void bna_fw_pause_set(struct bna_port *port);
+static void bna_fw_cb_pause_set(void *arg, int status);
+static void bna_fw_mtu_set(struct bna_port *port);
+static void bna_fw_cb_mtu_set(void *arg, int status);
+
+enum bna_port_event {
+ PORT_E_START = 1,
+ PORT_E_STOP = 2,
+ PORT_E_FAIL = 3,
+ PORT_E_PAUSE_CFG = 4,
+ PORT_E_MTU_CFG = 5,
+ PORT_E_CHLD_STOPPED = 6,
+ PORT_E_FWRESP_PAUSE = 7,
+ PORT_E_FWRESP_MTU = 8
+};
+
+enum bna_port_state {
+ BNA_PORT_STOPPED = 1,
+ BNA_PORT_MTU_INIT_WAIT = 2,
+ BNA_PORT_PAUSE_INIT_WAIT = 3,
+ BNA_PORT_LAST_RESP_WAIT = 4,
+ BNA_PORT_STARTED = 5,
+ BNA_PORT_PAUSE_CFG_WAIT = 6,
+ BNA_PORT_RX_STOP_WAIT = 7,
+ BNA_PORT_MTU_CFG_WAIT = 8,
+ BNA_PORT_CHLD_STOP_WAIT = 9
+};
+
+bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, started, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
+ enum bna_port_event);
+
+static struct bfa_sm_table port_sm_table[] = {
+ {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
+ {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
+ {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
+ {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
+ {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
+ {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
+ {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
+ {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
+ {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
+};
+
+static void
+bna_port_sm_stopped_entry(struct bna_port *port)
+{
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+ call_port_stop_cbfn(port, BNA_CB_SUCCESS);
+}
+
+static void
+bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_START:
+ bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
+ break;
+
+ case PORT_E_STOP:
+ call_port_stop_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_FAIL:
+ /* No-op */
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_MTU_CFG:
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ /**
+ * This event is received due to LLPort, Tx and Rx objects
+ * failing
+ */
+ /* No-op */
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ case PORT_E_FWRESP_MTU:
+ /**
+ * These events are received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
+{
+ bna_fw_mtu_set(port);
+}
+
+static void
+bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ /* No-op */
+ break;
+
+ case PORT_E_MTU_CFG:
+ port->flags |= BNA_PORT_F_MTU_CHANGED;
+ break;
+
+ case PORT_E_FWRESP_MTU:
+ if (port->flags & BNA_PORT_F_MTU_CHANGED) {
+ port->flags &= ~BNA_PORT_F_MTU_CHANGED;
+ bna_fw_mtu_set(port);
+ } else {
+ bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_pause_init_wait_entry(struct bna_port *port)
+{
+ bna_fw_pause_set(port);
+}
+
+static void
+bna_port_sm_pause_init_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ port->flags |= BNA_PORT_F_PAUSE_CHANGED;
+ break;
+
+ case PORT_E_MTU_CFG:
+ port->flags |= BNA_PORT_F_MTU_CHANGED;
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
+ port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
+ bna_fw_pause_set(port);
+ } else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
+ port->flags &= ~BNA_PORT_F_MTU_CHANGED;
+ bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
+ } else {
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ bna_port_chld_start(port);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_last_resp_wait_entry(struct bna_port *port)
+{
+}
+
+static void
+bna_port_sm_last_resp_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ case PORT_E_FWRESP_PAUSE:
+ case PORT_E_FWRESP_MTU:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_started_entry(struct bna_port *port)
+{
+ /**
+ * NOTE: Do not call bna_port_chld_start() here, since it will be
+ * inadvertently called during pause_cfg_wait->started transition
+ * as well
+ */
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+}
+
+static void
+bna_port_sm_started(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
+ break;
+
+ case PORT_E_MTU_CFG:
+ bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
+{
+ bna_fw_pause_set(port);
+}
+
+static void
+bna_port_sm_pause_cfg_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
+{
+ bna_port_rx_stop(port);
+}
+
+static void
+bna_port_sm_rx_stop_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
+{
+ bna_fw_mtu_set(port);
+}
+
+static void
+bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_FWRESP_MTU:
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ bna_port_rx_start(port);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
+{
+ bna_port_chld_stop(port);
+}
+
+static void
+bna_port_sm_chld_stop_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_fw_pause_set(struct bna_port *port)
+{
+ struct bfi_ll_set_pause_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.tx_pause = port->pause_config.tx_pause;
+ ll_req.rx_pause = port->pause_config.rx_pause;
+
+ bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_pause_set, port);
+
+ bna_mbox_send(port->bna, &port->mbox_qe);
+}
+
+static void
+bna_fw_cb_pause_set(void *arg, int status)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+ bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
+}
+
+void
+bna_fw_mtu_set(struct bna_port *port)
+{
+ struct bfi_ll_mtu_info_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
+ ll_req.mtu = htons((u16)port->mtu);
+
+ bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_mtu_set, port);
+ bna_mbox_send(port->bna, &port->mbox_qe);
+}
+
+void
+bna_fw_cb_mtu_set(void *arg, int status)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+ bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
+}
+
+static void
+bna_port_cb_chld_stopped(void *arg)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
+}
+
+void
+bna_port_init(struct bna_port *port, struct bna *bna)
+{
+ port->bna = bna;
+ port->flags = 0;
+ port->mtu = 0;
+ port->type = BNA_PORT_T_REGULAR;
+
+ port->link_cbfn = bnad_cb_port_link_status;
+
+ port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
+ port->chld_stop_wc.wc_cbarg = port;
+ port->chld_stop_wc.wc_count = 0;
+
+ port->stop_cbfn = NULL;
+ port->stop_cbarg = NULL;
+
+ port->pause_cbfn = NULL;
+
+ port->mtu_cbfn = NULL;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+
+ bna_llport_init(&port->llport, bna);
+}
+
+void
+bna_port_uninit(struct bna_port *port)
+{
+ bna_llport_uninit(&port->llport);
+
+ port->flags = 0;
+
+ port->bna = NULL;
+}
+
+int
+bna_port_state_get(struct bna_port *port)
+{
+ return bfa_sm_to_state(port_sm_table, port->fsm);
+}
+
+void
+bna_port_start(struct bna_port *port)
+{
+ port->flags |= BNA_PORT_F_DEVICE_READY;
+ if (port->flags & BNA_PORT_F_ENABLED)
+ bfa_fsm_send_event(port, PORT_E_START);
+}
+
+void
+bna_port_stop(struct bna_port *port)
+{
+ port->stop_cbfn = bna_device_cb_port_stopped;
+ port->stop_cbarg = &port->bna->device;
+
+ port->flags &= ~BNA_PORT_F_DEVICE_READY;
+ bfa_fsm_send_event(port, PORT_E_STOP);
+}
+
+void
+bna_port_fail(struct bna_port *port)
+{
+ port->flags &= ~BNA_PORT_F_DEVICE_READY;
+ bfa_fsm_send_event(port, PORT_E_FAIL);
+}
+
+void
+bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+void
+bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+void
+bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+ int status)
+{
+ int i;
+ u8 prio_map;
+
+ port->llport.link_status = BNA_LINK_UP;
+ if (aen->cee_linkup)
+ port->llport.link_status = BNA_CEE_UP;
+
+ /* Compute the priority */
+ prio_map = aen->prio_map;
+ if (prio_map) {
+ for (i = 0; i < 8; i++) {
+ if ((prio_map >> i) & 0x1)
+ break;
+ }
+ port->priority = i;
+ } else
+ port->priority = 0;
+
+ /* Dispatch events */
+ bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
+ bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
+ port->link_cbfn(port->bna->bnad, port->llport.link_status);
+}
+
+void
+bna_port_cb_link_down(struct bna_port *port, int status)
+{
+ port->llport.link_status = BNA_LINK_DOWN;
+
+ /* Dispatch events */
+ bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
+ port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
+}
+
+int
+bna_port_mtu_get(struct bna_port *port)
+{
+ return port->mtu;
+}
+
+void
+bna_port_enable(struct bna_port *port)
+{
+ if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
+ return;
+
+ port->flags |= BNA_PORT_F_ENABLED;
+
+ if (port->flags & BNA_PORT_F_DEVICE_READY)
+ bfa_fsm_send_event(port, PORT_E_START);
+}
+
+void
+bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
+ void (*cbfn)(void *, enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
+ return;
+ }
+
+ port->stop_cbfn = cbfn;
+ port->stop_cbarg = port->bna->bnad;
+
+ port->flags &= ~BNA_PORT_F_ENABLED;
+
+ bfa_fsm_send_event(port, PORT_E_STOP);
+}
+
+void
+bna_port_pause_config(struct bna_port *port,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *, enum bna_cb_status))
+{
+ port->pause_config = *pause_config;
+
+ port->pause_cbfn = cbfn;
+
+ bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
+}
+
+void
+bna_port_mtu_set(struct bna_port *port, int mtu,
+ void (*cbfn)(struct bnad *, enum bna_cb_status))
+{
+ port->mtu = mtu;
+
+ port->mtu_cbfn = cbfn;
+
+ bfa_fsm_send_event(port, PORT_E_MTU_CFG);
+}
+
+void
+bna_port_mac_get(struct bna_port *port, mac_t *mac)
+{
+ *mac = bfa_ioc_get_mac(&port->bna->device.ioc);
+}
+
+/**
+ * Should be called only when port is disabled
+ */
+void
+bna_port_type_set(struct bna_port *port, enum bna_port_type type)
+{
+ port->type = type;
+ port->llport.type = type;
+}
+
+/**
+ * Should be called only when port is disabled
+ */
+void
+bna_port_linkcbfn_set(struct bna_port *port,
+ void (*linkcbfn)(struct bnad *, enum bna_link_status))
+{
+ port->link_cbfn = linkcbfn;
+}
+
+void
+bna_port_admin_up(struct bna_port *port)
+{
+ struct bna_llport *llport = &port->llport;
+
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ return;
+
+ llport->flags |= BNA_LLPORT_F_ENABLED;
+
+ if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_UP);
+}
+
+void
+bna_port_admin_down(struct bna_port *port)
+{
+ struct bna_llport *llport = &port->llport;
+
+ if (!(llport->flags & BNA_LLPORT_F_ENABLED))
+ return;
+
+ llport->flags &= ~BNA_LLPORT_F_ENABLED;
+
+ if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+}
+
+/**
+ * DEVICE
+ */
+#define enable_mbox_intr(_device)\
+do {\
+ u32 intr_status;\
+ bna_intr_status_get((_device)->bna, intr_status);\
+ bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
+ bna_mbox_intr_enable((_device)->bna);\
+} while (0)
+
+#define disable_mbox_intr(_device)\
+do {\
+ bna_mbox_intr_disable((_device)->bna);\
+ bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
+} while (0)
+
+const struct bna_chip_regs_offset reg_offset[] =
+{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
+ HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
+{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
+ HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
+{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
+ HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
+{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
+ HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
+};
+
+enum bna_device_event {
+ DEVICE_E_ENABLE = 1,
+ DEVICE_E_DISABLE = 2,
+ DEVICE_E_IOC_READY = 3,
+ DEVICE_E_IOC_FAILED = 4,
+ DEVICE_E_IOC_DISABLED = 5,
+ DEVICE_E_IOC_RESET = 6,
+ DEVICE_E_PORT_STOPPED = 7,
+};
+
+enum bna_device_state {
+ BNA_DEVICE_STOPPED = 1,
+ BNA_DEVICE_IOC_READY_WAIT = 2,
+ BNA_DEVICE_READY = 3,
+ BNA_DEVICE_PORT_STOP_WAIT = 4,
+ BNA_DEVICE_IOC_DISABLE_WAIT = 5,
+ BNA_DEVICE_FAILED = 6
+};
+
+bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ready, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, failed, struct bna_device,
+ enum bna_device_event);
+
+static struct bfa_sm_table device_sm_table[] = {
+ {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
+ {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
+ {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
+ {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
+ {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
+ {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
+};
+
+static void
+bna_device_sm_stopped_entry(struct bna_device *device)
+{
+ if (device->stop_cbfn)
+ device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
+
+ device->stop_cbfn = NULL;
+ device->stop_cbarg = NULL;
+}
+
+static void
+bna_device_sm_stopped(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_ENABLE:
+ if (device->intr_type == BNA_INTR_T_MSIX)
+ bna_mbox_msix_idx_set(device);
+ bfa_ioc_enable(&device->ioc);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
+ break;
+
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
+{
+ /**
+ * Do not call bfa_ioc_enable() here. It must be called in the
+ * previous state due to failed -> ioc_ready_wait transition.
+ */
+}
+
+static void
+bna_device_sm_ioc_ready_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_INTERRUPT);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_READY:
+ bfa_fsm_set_state(device, bna_device_sm_ready);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ready_entry(struct bna_device *device)
+{
+ bna_mbox_mod_start(&device->bna->mbox_mod);
+ bna_port_start(&device->bna->port);
+
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_SUCCESS);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+}
+
+static void
+bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_port_stop_wait_entry(struct bna_device *device)
+{
+ bna_port_stop(&device->bna->port);
+}
+
+static void
+bna_device_sm_port_stop_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_PORT_STOPPED:
+ bna_mbox_mod_stop(&device->bna->mbox_mod);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ disable_mbox_intr(device);
+ bna_port_fail(&device->bna->port);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
+{
+ bfa_ioc_disable(&device->ioc);
+}
+
+static void
+bna_device_sm_ioc_disable_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_IOC_DISABLED:
+ disable_mbox_intr(device);
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_failed_entry(struct bna_device *device)
+{
+ disable_mbox_intr(device);
+ bna_port_fail(&device->bna->port);
+ bna_mbox_mod_stop(&device->bna->mbox_mod);
+
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_FAIL);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+}
+
+static void
+bna_device_sm_failed(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+/* IOC callback functions */
+
+static void
+bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ if (error)
+ bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
+ else
+ bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
+}
+
+static void
+bna_device_cb_iocll_disabled(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
+}
+
+static void
+bna_device_cb_iocll_failed(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
+}
+
+static void
+bna_device_cb_iocll_reset(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
+}
+
+static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
+ bna_device_cb_iocll_ready,
+ bna_device_cb_iocll_disabled,
+ bna_device_cb_iocll_failed,
+ bna_device_cb_iocll_reset
+};
+
+void
+bna_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u64 dma;
+
+ device->bna = bna;
+
+ /**
+ * Attach IOC and claim:
+ * 1. DMA memory for IOC attributes
+ * 2. Kernel memory for FW trace
+ */
+ bfa_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
+ bfa_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
+
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
+ bfa_ioc_mem_claim(&device->ioc,
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
+ dma);
+
+ bna_adv_device_init(device, bna, res_info);
+ /*
+ * Initialize mbox_mod only after IOC, so that mbox handler
+ * registration goes through
+ */
+ device->intr_type =
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
+ device->vector =
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
+ bna_mbox_mod_init(&bna->mbox_mod, bna);
+
+ device->ready_cbfn = device->stop_cbfn = NULL;
+ device->ready_cbarg = device->stop_cbarg = NULL;
+
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+}
+
+void
+bna_device_uninit(struct bna_device *device)
+{
+ bna_mbox_mod_uninit(&device->bna->mbox_mod);
+
+ bfa_cee_detach(&device->bna->cee);
+
+ bfa_ioc_detach(&device->ioc);
+
+ device->bna = NULL;
+}
+
+void
+bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
+{
+ struct bna_device *device = (struct bna_device *)arg;
+
+ bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
+}
+
+int
+bna_device_status_get(struct bna_device *device)
+{
+ return (device->fsm == (bfa_fsm_t)bna_device_sm_ready);
+}
+
+void
+bna_device_enable(struct bna_device *device)
+{
+ if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
+ bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
+ return;
+ }
+
+ device->ready_cbfn = bnad_cb_device_enabled;
+ device->ready_cbarg = device->bna->bnad;
+
+ bfa_fsm_send_event(device, DEVICE_E_ENABLE);
+}
+
+void
+bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
+ return;
+ }
+
+ device->stop_cbfn = bnad_cb_device_disabled;
+ device->stop_cbarg = device->bna->bnad;
+
+ bfa_fsm_send_event(device, DEVICE_E_DISABLE);
+}
+
+int
+bna_device_state_get(struct bna_device *device)
+{
+ return bfa_sm_to_state(device_sm_table, device->fsm);
+}
+
+u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ {12, 20},
+ {10, 18},
+ {8, 16},
+ {6, 12},
+ {4, 8},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ {12, 12},
+ {6, 10},
+ {5, 10},
+ {4, 8},
+ {3, 6},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+/* device */
+void
+bna_adv_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u8 *kva;
+ u64 dma;
+
+ device->bna = bna;
+
+ kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+
+ /**
+ * Attach common modules (Diag, SFP, CEE, Port) and claim respective
+ * DMA memory.
+ */
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+ kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+
+ bfa_cee_attach(&bna->cee, &device->ioc, bna);
+ bfa_cee_mem_claim(&bna->cee, kva, dma);
+ kva += bfa_cee_meminfo();
+ dma += bfa_cee_meminfo();
+
+}
+
+/* utils */
+
+void
+bna_adv_res_req(struct bna_res_info *res_info)
+{
+ /* DMA memory for COMMON_MODULE */
+ res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
+ bfa_cee_meminfo(), PAGE_SIZE);
+
+ /* Virtual memory for retreiving fw_trc */
+ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+
+ /* DMA memory for retreiving stats */
+ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
+ ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
+
+ /* Virtual memory for soft stats */
+ res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
+ sizeof(struct bna_sw_stats);
+}
+
+static void
+bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
+{
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+ struct list_head *txq_qe;
+ struct list_head *rxp_qe;
+ struct list_head *mac_qe;
+ int i;
+
+ sw_stats->device_state = bna_device_state_get(&bna->device);
+ sw_stats->port_state = bna_port_state_get(&bna->port);
+ sw_stats->port_flags = bna->port.flags;
+ sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
+ sw_stats->priority = bna->port.priority;
+
+ i = 0;
+ list_for_each(qe, &bna->tx_mod.tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
+ sw_stats->tx_stats[i].tx_flags = tx->flags;
+
+ sw_stats->tx_stats[i].num_txqs = 0;
+ sw_stats->tx_stats[i].txq_bmap[0] = 0;
+ sw_stats->tx_stats[i].txq_bmap[1] = 0;
+ list_for_each(txq_qe, &tx->txq_q) {
+ txq = (struct bna_txq *)txq_qe;
+ if (txq->txq_id < 32)
+ sw_stats->tx_stats[i].txq_bmap[0] |=
+ ((u32)1 << txq->txq_id);
+ else
+ sw_stats->tx_stats[i].txq_bmap[1] |=
+ ((u32)
+ 1 << (txq->txq_id - 32));
+ sw_stats->tx_stats[i].num_txqs++;
+ }
+
+ sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
+
+ i++;
+ }
+ sw_stats->num_active_tx = i;
+
+ i = 0;
+ list_for_each(qe, &bna->rx_mod.rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
+ sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
+
+ sw_stats->rx_stats[i].num_rxps = 0;
+ sw_stats->rx_stats[i].num_rxqs = 0;
+ sw_stats->rx_stats[i].rxq_bmap[0] = 0;
+ sw_stats->rx_stats[i].rxq_bmap[1] = 0;
+ sw_stats->rx_stats[i].cq_bmap[0] = 0;
+ sw_stats->rx_stats[i].cq_bmap[1] = 0;
+ list_for_each(rxp_qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)rxp_qe;
+
+ sw_stats->rx_stats[i].num_rxqs += 1;
+
+ if (rxp->type == BNA_RXP_SINGLE) {
+ if (rxp->rxq.single.only->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.single.only->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.single.only->rxq_id - 32));
+ }
+ } else {
+ if (rxp->rxq.slr.large->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.slr.large->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.slr.large->rxq_id - 32));
+ }
+
+ if (rxp->rxq.slr.small->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.slr.small->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.slr.small->rxq_id - 32));
+ }
+ sw_stats->rx_stats[i].num_rxqs += 1;
+ }
+
+ if (rxp->cq.cq_id < 32)
+ sw_stats->rx_stats[i].cq_bmap[0] |=
+ (1 << rxp->cq.cq_id);
+ else
+ sw_stats->rx_stats[i].cq_bmap[1] |=
+ (1 << (rxp->cq.cq_id - 32));
+
+ sw_stats->rx_stats[i].num_rxps++;
+ }
+
+ sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
+ sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
+ sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
+
+ sw_stats->rx_stats[i].num_active_ucast = 0;
+ if (rx->rxf.ucast_active_mac)
+ sw_stats->rx_stats[i].num_active_ucast++;
+ list_for_each(mac_qe, &rx->rxf.ucast_active_q)
+ sw_stats->rx_stats[i].num_active_ucast++;
+
+ sw_stats->rx_stats[i].num_active_mcast = 0;
+ list_for_each(mac_qe, &rx->rxf.mcast_active_q)
+ sw_stats->rx_stats[i].num_active_mcast++;
+
+ sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
+ sw_stats->rx_stats[i].vlan_filter_status =
+ rx->rxf.vlan_filter_status;
+ memcpy(sw_stats->rx_stats[i].vlan_filter_table,
+ rx->rxf.vlan_filter_table,
+ sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
+
+ sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
+ sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
+
+ i++;
+ }
+ sw_stats->num_active_rx = i;
+}
+
+static void
+bna_fw_cb_stats_get(void *arg, int status)
+{
+ struct bna *bna = (struct bna *)arg;
+ u64 *p_stats;
+ int i, count;
+ int rxf_count, txf_count;
+ u64 rxf_bmap, txf_bmap;
+
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+
+ if (status == 0) {
+ p_stats = (u64 *)bna->stats.hw_stats;
+ count = sizeof(struct bfi_ll_stats) / sizeof(u64);
+ for (i = 0; i < count; i++)
+ p_stats[i] = cpu_to_be64(p_stats[i]);
+
+ rxf_count = 0;
+ rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
+ ((u64)bna->stats.rxf_bmap[1] << 32);
+ for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
+ if (rxf_bmap & ((u64)1 << i))
+ rxf_count++;
+
+ txf_count = 0;
+ txf_bmap = (u64)bna->stats.txf_bmap[0] |
+ ((u64)bna->stats.txf_bmap[1] << 32);
+ for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
+ if (txf_bmap & ((u64)1 << i))
+ txf_count++;
+
+ p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
+ ((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
+ txf_count * sizeof(struct bfi_ll_stats_txf))/
+ sizeof(u64));
+
+ /* Populate the TXF stats from the firmware DMAed copy */
+ for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
+ if (txf_bmap & ((u64)1 << i)) {
+ p_stats -= sizeof(struct bfi_ll_stats_txf)/
+ sizeof(u64);
+ memcpy(&bna->stats.hw_stats->txf_stats[i],
+ p_stats,
+ sizeof(struct bfi_ll_stats_txf));
+ }
+
+ /* Populate the RXF stats from the firmware DMAed copy */
+ for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
+ if (rxf_bmap & ((u64)1 << i)) {
+ p_stats -= sizeof(struct bfi_ll_stats_rxf)/
+ sizeof(u64);
+ memcpy(&bna->stats.hw_stats->rxf_stats[i],
+ p_stats,
+ sizeof(struct bfi_ll_stats_rxf));
+ }
+
+ bna_sw_stats_get(bna, bna->stats.sw_stats);
+ bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
+ } else
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+}
+
+static void
+bna_fw_stats_get(struct bna *bna)
+{
+ struct bfi_ll_stats_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
+ ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
+
+ ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
+ ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]);
+ ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]);
+ ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]);
+
+ ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
+ ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
+
+ bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_stats_get, bna);
+ bna_mbox_send(bna, &bna->mbox_qe);
+
+ bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
+ bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
+ bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
+ bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
+}
+
+static void
+bna_fw_cb_stats_clr(void *arg, int status)
+{
+ struct bna *bna = (struct bna *)arg;
+
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+
+ memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
+ memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
+
+ bnad_cb_stats_clr(bna->bnad);
+}
+
+static void
+bna_fw_stats_clr(struct bna *bna)
+{
+ struct bfi_ll_stats_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
+ ll_req.rxf_id_mask[0] = htonl(0xffffffff);
+ ll_req.rxf_id_mask[1] = htonl(0xffffffff);
+ ll_req.txf_id_mask[0] = htonl(0xffffffff);
+ ll_req.txf_id_mask[1] = htonl(0xffffffff);
+
+ bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_stats_clr, bna);
+ bna_mbox_send(bna, &bna->mbox_qe);
+}
+
+void
+bna_stats_get(struct bna *bna)
+{
+ if (bna_device_status_get(&bna->device))
+ bna_fw_stats_get(bna);
+ else
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+}
+
+void
+bna_stats_clr(struct bna *bna)
+{
+ if (bna_device_status_get(&bna->device))
+ bna_fw_stats_clr(bna);
+ else {
+ memset(&bna->stats.sw_stats, 0,
+ sizeof(struct bna_sw_stats));
+ memset(bna->stats.hw_stats, 0,
+ sizeof(struct bfi_ll_stats));
+ bnad_cb_stats_clr(bna->bnad);
+ }
+}
+
+/* IB */
+void
+bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
+{
+ ib->ib_config.coalescing_timeo = coalescing_timeo;
+
+ if (ib->start_count)
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->ib_config.coalescing_timeo, 0);
+}
+
+/* RxF */
+void
+bna_rxf_adv_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config)
+{
+ switch (q_config->rxp_type) {
+ case BNA_RXP_SINGLE:
+ /* No-op */
+ break;
+ case BNA_RXP_SLR:
+ rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
+ break;
+ case BNA_RXP_HDS:
+ rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
+ rxf->hds_cfg.header_size =
+ q_config->hds_config.header_size;
+ rxf->forced_offset = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
+ rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
+ rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
+ rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
+ memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
+ &q_config->rss_config.toeplitz_hash_key[0],
+ sizeof(rxf->rss_cfg.toeplitz_hash_key));
+ }
+}
+
+static void
+rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
+{
+ struct bfi_ll_rxf_req req;
+
+ bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
+
+ req.rxf_id = rxf->rxf_id;
+ req.enable = status;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
+ rxf_cb_cam_fltr_mbox_cmd, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+void
+__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+ u32 ctrl_flags;
+ int i;
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
+ RX_FNDB_RAM_BASE_OFFSET);
+
+ for (i = 0; i < BFI_MAX_RXF; i++) {
+ if (status == BNA_STATUS_T_ENABLED) {
+ if (i == rxf->rxf_id)
+ continue;
+
+ ctrl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctrl_flags,
+ &rx_fndb_ram[i].control_flags);
+ } else {
+ ctrl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctrl_flags,
+ &rx_fndb_ram[i].control_flags);
+ }
+ }
+}
+
+int
+rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Add additional MAC entries */
+ if (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->ucast_active_q);
+ return 1;
+ }
+
+ /* Delete MAC addresses previousely added */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable promiscuous mode */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_PROMISC;
+
+ /* Disable VLAN filter to allow all VLANs */
+ __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable default mode */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
+
+ /* Disable VLAN filter to allow all VLANs */
+ __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
+ /* Redirect all other RxF vlan filtering to this one */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* Enable/disable allmulti mode */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
+
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* 1. delete pending ucast entries */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ /* 2. clear active ucast entries; move them to pending_add_q */
+ if (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 6. Execute pending promisc mode disable command */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 7. Clear active promisc mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* move promisc configuration from active -> pending */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 8. Execute pending default mode disable command */
+ if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 9. Clear active default mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ /* move default configuration from active -> pending */
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* 10. Execute pending allmulti mode disable command */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 11. Clear active allmulti mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ /* move allmulti configuration from active -> pending */
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* 1. Move active ucast entries to pending_add_q */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->ucast_pending_add_q);
+ }
+
+ /* 2. Throw away delete pending ucast entries */
+ while (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+}
+
+void
+rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 6. Clear pending promisc mode disable */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+ }
+
+ /* 7. Move promisc mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ }
+
+}
+
+void
+rxf_reset_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 8. Clear pending default mode disable */
+ if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+ }
+
+ /* 9. Move default mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ }
+}
+
+void
+rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* 10. Clear pending allmulti mode disable */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ }
+
+ /* 11. Move allmulti mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ }
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_promisc_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
+ /* Schedule enable */
+ } else {
+ /* Promisc mode should not be active in the system */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->rxf_promisc_id = rxf->rxf_id;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_promisc_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Promisc mode should not be active */
+ /* system promisc state should be pending */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ /* Remove the promisc state from the system */
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* Promisc mode should be active in the system */
+ promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+
+ /* Do nothing if already disabled */
+ } else {
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_default_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
+ /* Schedule enable */
+ } else {
+ /* Default mode should not be active in the system */
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->rxf_default_id = rxf->rxf_id;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_default_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Promisc mode should not be active */
+ /* system default state should be pending */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ /* Remove the default state from the system */
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ /* Default mode should be active in the system */
+ default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+
+ /* Do nothing if already disabled */
+ } else {
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_allmulti_enable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
+ /* Schedule enable */
+ } else {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_allmulti_disable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Allmulti mode should not be active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* RxF <- bnad */
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+ int need_hw_config = 0;
+
+ /* Purge all entries from pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ /* Schedule all entries in active_q for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+ need_hw_config = 1;
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ return;
+ }
+
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+}
+
+/* RxF <- Rx */
+void
+bna_rx_receive_resume(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
+ rxf->oper_state_cbfn = cbfn;
+ rxf->oper_state_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_RESUME);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+}
+
+void
+bna_rx_receive_pause(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
+ rxf->oper_state_cbfn = cbfn;
+ rxf->oper_state_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_PAUSE);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+}
+
+/* RxF <- bnad */
+enum bna_cb_status
+bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* Check if already added */
+ list_for_each(qe, &rxf->ucast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ /* Check if pending addition */
+ list_for_each(qe, &rxf->ucast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ if (mac == NULL)
+ return BNA_CB_UCAST_CAM_FULL;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, addr, ETH_ALEN);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+/* RxF <- bnad */
+enum bna_cb_status
+bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ list_for_each(qe, &rxf->ucast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ list_for_each(qe, &rxf->ucast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->ucast_pending_del_q);
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ return BNA_CB_INVALID_MAC;
+}
+
+/* RxF <- bnad */
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int need_hw_config = 0;
+
+ /* Error checks */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ /* If promisc mode is already enabled elsewhere in the system */
+ if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
+ (rx->bna->rxf_promisc_id != rxf->rxf_id))
+ goto err_return;
+
+ /* If default mode is already enabled in the system */
+ if (rx->bna->rxf_default_id != BFI_MAX_RXF)
+ goto err_return;
+
+ /* Trying to enable promiscuous and default mode together */
+ if (is_default_enable(new_mode, bitmask))
+ goto err_return;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ /* If default mode is already enabled elsewhere in the system */
+ if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
+ (rx->bna->rxf_default_id != rxf->rxf_id)) {
+ goto err_return;
+ }
+
+ /* If promiscuous mode is already enabled in the system */
+ if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
+ goto err_return;
+ }
+
+ /* Process the commands */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ if (rxf_promisc_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_promisc_disable(new_mode, bitmask)) {
+ if (rxf_promisc_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ if (rxf_default_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_default_disable(new_mode, bitmask)) {
+ if (rxf_default_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_allmulti_enable(new_mode, bitmask)) {
+ if (rxf_allmulti_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_allmulti_disable(new_mode, bitmask)) {
+ if (rxf_allmulti_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ /* Trigger h/w if needed */
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ return BNA_CB_FAIL;
+}
+
+/* RxF <- bnad */
+void
+bna_rx_rss_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+ rxf->rss_status = BNA_STATUS_T_ENABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+}
+
+/* RxF <- bnad */
+void
+bna_rx_rss_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+ rxf->rss_status = BNA_STATUS_T_DISABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+}
+
+/* RxF <- bnad */
+void
+bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+ rxf->rss_status = BNA_STATUS_T_ENABLED;
+ rxf->rss_cfg = *rss_config;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+}
+
+void
+/* RxF <- bnad */
+bna_rx_vlanfilter_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/* RxF <- bnad */
+void
+bna_rx_vlanfilter_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/* Rx */
+
+struct bna_rxp *
+bna_rx_get_rxp(struct bna_rx *rx, int vector)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ if (rxp->vector == vector)
+ return rxp;
+ }
+ return NULL;
+}
+
+/*
+ * bna_rx_rss_rit_set()
+ * Sets the Q ids for the specified msi-x vectors in the RIT.
+ * Maximum rit size supported is 64, which should be the max size of the
+ * vectors array.
+ */
+
+void
+bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
+{
+ int i;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+ struct bna *bna;
+ struct bna_rxf *rxf;
+
+ /* Build the RIT contents for this RX */
+ bna = rx->bna;
+
+ rxf = &rx->rxf;
+ for (i = 0; i < nvectors; i++) {
+ rxp = bna_rx_get_rxp(rx, vectors[i]);
+
+ GET_RXQS(rxp, q0, q1);
+ rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
+ rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
+ }
+
+ rxf->rit_segment->rit_size = nvectors;
+
+ /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
+}
+
+/* Rx <- bnad */
+void
+bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
+ bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
+ }
+}
+
+/* Rx <- bnad */
+void
+bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
+{
+ int i, j;
+
+ for (i = 0; i < BNA_LOAD_T_MAX; i++)
+ for (j = 0; j < BNA_BIAS_T_MAX; j++)
+ bna->rx_mod.dim_vector[i][j] = vector[i][j];
+}
+
+/* Rx <- bnad */
+void
+bna_rx_dim_update(struct bna_ccb *ccb)
+{
+ struct bna *bna = ccb->cq->rx->bna;
+ u32 load, bias;
+ u32 pkt_rt, small_rt, large_rt;
+ u8 coalescing_timeo;
+
+ if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
+ (ccb->pkt_rate.large_pkt_cnt == 0))
+ return;
+
+ /* Arrive at preconfigured coalescing timeo value based on pkt rate */
+
+ small_rt = ccb->pkt_rate.small_pkt_cnt;
+ large_rt = ccb->pkt_rate.large_pkt_cnt;
+
+ pkt_rt = small_rt + large_rt;
+
+ if (pkt_rt < BNA_PKT_RATE_10K)
+ load = BNA_LOAD_T_LOW_4;
+ else if (pkt_rt < BNA_PKT_RATE_20K)
+ load = BNA_LOAD_T_LOW_3;
+ else if (pkt_rt < BNA_PKT_RATE_30K)
+ load = BNA_LOAD_T_LOW_2;
+ else if (pkt_rt < BNA_PKT_RATE_40K)
+ load = BNA_LOAD_T_LOW_1;
+ else if (pkt_rt < BNA_PKT_RATE_50K)
+ load = BNA_LOAD_T_HIGH_1;
+ else if (pkt_rt < BNA_PKT_RATE_60K)
+ load = BNA_LOAD_T_HIGH_2;
+ else if (pkt_rt < BNA_PKT_RATE_80K)
+ load = BNA_LOAD_T_HIGH_3;
+ else
+ load = BNA_LOAD_T_HIGH_4;
+
+ if (small_rt > (large_rt << 1))
+ bias = 0;
+ else
+ bias = 1;
+
+ ccb->pkt_rate.small_pkt_cnt = 0;
+ ccb->pkt_rate.large_pkt_cnt = 0;
+
+ coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
+ ccb->rx_coalescing_timeo = coalescing_timeo;
+
+ /* Set it to IB */
+ bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
+}
+
+/* Tx */
+/* TX <- bnad */
+enum bna_cb_status
+bna_tx_prio_set(struct bna_tx *tx, int prio,
+ void (*cbfn)(struct bnad *, struct bna_tx *,
+ enum bna_cb_status))
+{
+ if (tx->flags & BNA_TX_F_PRIO_LOCK)
+ return BNA_CB_FAIL;
+ else {
+ tx->prio_change_cbfn = cbfn;
+ bna_tx_prio_changed(tx, prio);
+ }
+
+ return BNA_CB_SUCCESS;
+}
+
+/* TX <- bnad */
+void
+bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
+ }
+}
+
+/*
+ * Private data
+ */
+
+struct bna_ritseg_pool_cfg {
+ u32 pool_size;
+ u32 pool_entry_size;
+};
+init_ritseg_pool(ritseg_pool_cfg);
+
+/*
+ * Private functions
+ */
+static void
+bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ ucam_mod->ucmac = (struct bna_mac *)
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ucam_mod->free_q);
+ for (i = 0; i < BFI_MAX_UCMAC; i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
+ }
+
+ ucam_mod->bna = bna;
+}
+
+static void
+bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &ucam_mod->free_q)
+ i++;
+
+ ucam_mod->bna = NULL;
+}
+
+static void
+bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ mcam_mod->mcmac = (struct bna_mac *)
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&mcam_mod->free_q);
+ for (i = 0; i < BFI_MAX_MCMAC; i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
+ }
+
+ mcam_mod->bna = bna;
+}
+
+static void
+bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &mcam_mod->free_q)
+ i++;
+
+ mcam_mod->bna = NULL;
+}
+
+static void
+bna_rit_mod_init(struct bna_rit_mod *rit_mod,
+ struct bna_res_info *res_info)
+{
+ int i;
+ int j;
+ int count;
+ int offset;
+
+ rit_mod->rit = (struct bna_rit_entry *)
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
+ rit_mod->rit_segment = (struct bna_rit_segment *)
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
+
+ count = 0;
+ offset = 0;
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
+ for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
+ bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
+ rit_mod->rit_segment[count].max_rit_size =
+ ritseg_pool_cfg[i].pool_entry_size;
+ rit_mod->rit_segment[count].rit_offset = offset;
+ rit_mod->rit_segment[count].rit =
+ &rit_mod->rit[offset];
+ list_add_tail(&rit_mod->rit_segment[count].qe,
+ &rit_mod->rit_seg_pool[i]);
+ count++;
+ offset += ritseg_pool_cfg[i].pool_entry_size;
+ }
+ }
+}
+
+static void
+bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
+{
+ struct bna_rit_segment *rit_segment;
+ struct list_head *qe;
+ int i;
+ int j;
+
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ j = 0;
+ list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
+ rit_segment = (struct bna_rit_segment *)qe;
+ j++;
+ }
+ }
+}
+
+/*
+ * Public functions
+ */
+
+/* Called during probe(), before calling bna_init() */
+void
+bna_res_req(struct bna_res_info *res_info)
+{
+ bna_adv_res_req(res_info);
+
+ /* DMA memory for retrieving IOC attributes */
+ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
+ ALIGN(bfa_ioc_meminfo(), PAGE_SIZE);
+
+ /* DMA memory for index segment of an IB */
+ res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
+ BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
+
+ /* Virtual memory for IB objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
+ BFI_MAX_IB * sizeof(struct bna_ib);
+
+ /* Virtual memory for intr objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
+ BFI_MAX_IB * sizeof(struct bna_intr);
+
+ /* Virtual memory for idx_seg objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
+ BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
+
+ /* Virtual memory for Tx objects - stored by Tx module */
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
+ BFI_MAX_TXQ * sizeof(struct bna_tx);
+
+ /* Virtual memory for TxQ - stored by Tx module */
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
+ BFI_MAX_TXQ * sizeof(struct bna_txq);
+
+ /* Virtual memory for Rx objects - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rx);
+
+ /* Virtual memory for RxPath - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rxp);
+
+ /* Virtual memory for RxQ - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rxq);
+
+ /* Virtual memory for Unicast MAC address - stored by ucam module */
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
+ BFI_MAX_UCMAC * sizeof(struct bna_mac);
+
+ /* Virtual memory for Multicast MAC address - stored by mcam module */
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
+ BFI_MAX_MCMAC * sizeof(struct bna_mac);
+
+ /* Virtual memory for RIT entries */
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
+ BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
+
+ /* Virtual memory for RIT segment table */
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
+ BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
+
+ /* Interrupt resource for mailbox interrupt */
+ res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
+}
+
+/* Called during probe() */
+void
+bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info)
+{
+ bna->bnad = bnad;
+ bna->pcidev = *pcidev;
+
+ bna->stats.hw_stats = (struct bfi_ll_stats *)
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
+ bna->hw_stats_dma.msb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
+ bna->hw_stats_dma.lsb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
+ bna->stats.sw_stats = (struct bna_sw_stats *)
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
+
+ bna->regs.page_addr = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].page_addr;
+ bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].fn_int_status;
+ bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].fn_int_mask;
+
+ if (bna->pcidev.pci_func < 3)
+ bna->port_num = 0;
+ else
+ bna->port_num = 1;
+
+ /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
+ bna_device_init(&bna->device, bna, res_info);
+
+ bna_port_init(&bna->port, bna);
+
+ bna_tx_mod_init(&bna->tx_mod, bna, res_info);
+
+ bna_rx_mod_init(&bna->rx_mod, bna, res_info);
+
+ bna_ib_mod_init(&bna->ib_mod, bna, res_info);
+
+ bna_rit_mod_init(&bna->rit_mod, res_info);
+
+ bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
+
+ bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
+
+ bna->rxf_default_id = BFI_MAX_RXF;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Mbox q element for posting stat request to f/w */
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+}
+
+void
+bna_uninit(struct bna *bna)
+{
+ bna_mcam_mod_uninit(&bna->mcam_mod);
+
+ bna_ucam_mod_uninit(&bna->ucam_mod);
+
+ bna_rit_mod_uninit(&bna->rit_mod);
+
+ bna_ib_mod_uninit(&bna->ib_mod);
+
+ bna_rx_mod_uninit(&bna->rx_mod);
+
+ bna_tx_mod_uninit(&bna->tx_mod);
+
+ bna_port_uninit(&bna->port);
+
+ bna_device_uninit(&bna->device);
+
+ bna->bnad = NULL;
+}
+
+struct bna_mac *
+bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&ucam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&ucam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &ucam_mod->free_q);
+}
+
+struct bna_mac *
+bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&mcam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&mcam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &mcam_mod->free_q);
+}
+
+/**
+ * Note: This should be called in the same locking context as the call to
+ * bna_rit_mod_seg_get()
+ */
+int
+bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
+{
+ int i;
+
+ /* Select the pool for seg_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ if (i == BFI_RIT_SEG_TOTAL_POOLS)
+ return 0;
+
+ if (list_empty(&rit_mod->rit_seg_pool[i]))
+ return 0;
+
+ return 1;
+}
+
+struct bna_rit_segment *
+bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
+{
+ struct bna_rit_segment *seg;
+ struct list_head *qe;
+ int i;
+
+ /* Select the pool for seg_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ if (i == BFI_RIT_SEG_TOTAL_POOLS)
+ return NULL;
+
+ if (list_empty(&rit_mod->rit_seg_pool[i]))
+ return NULL;
+
+ bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
+ seg = (struct bna_rit_segment *)qe;
+ bfa_q_qe_init(&seg->qe);
+ seg->rit_size = seg_size;
+
+ return seg;
+}
+
+void
+bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
+ struct bna_rit_segment *seg)
+{
+ int i;
+
+ /* Select the pool for seg->max_rit_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ seg->rit_size = 0;
+ list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
+}
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * File for interrupt macros and functions
+ */
+
+#ifndef __BNA_HW_H__
+#define __BNA_HW_H__
+
+#include "bfi_ctreg.h"
+
+/**
+ *
+ * SW imposed limits
+ *
+ */
+
+#ifndef BNA_BIOS_BUILD
+
+#define BFI_MAX_TXQ 64
+#define BFI_MAX_RXQ 64
+#define BFI_MAX_RXF 64
+#define BFI_MAX_IB 128
+#define BFI_MAX_RIT_SIZE 256
+#define BFI_RSS_RIT_SIZE 64
+#define BFI_NONRSS_RIT_SIZE 1
+#define BFI_MAX_UCMAC 256
+#define BFI_MAX_MCMAC 512
+#define BFI_IBIDX_SIZE 4
+#define BFI_MAX_VLAN 4095
+
+/**
+ * There are 2 free IB index pools:
+ * pool1: 120 segments of 1 index each
+ * pool8: 1 segment of 8 indexes
+ */
+#define BFI_IBIDX_POOL1_SIZE 116
+#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
+#define BFI_IBIDX_POOL2_SIZE 2
+#define BFI_IBIDX_POOL2_ENTRY_SIZE 2
+#define BFI_IBIDX_POOL8_SIZE 1
+#define BFI_IBIDX_POOL8_ENTRY_SIZE 8
+#define BFI_IBIDX_TOTAL_POOLS 3
+#define BFI_IBIDX_TOTAL_SEGS 119 /* (POOL1 + POOL2 + POOL8)_SIZE */
+#define BFI_IBIDX_MAX_SEGSIZE 8
+#define init_ibidx_pool(name) \
+static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
+{ \
+ { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }, \
+ { BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE }, \
+ { BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE } \
+}
+
+/**
+ * There are 2 free RIT segment pools:
+ * Pool1: 192 segments of 1 RIT entry each
+ * Pool2: 1 segment of 64 RIT entry
+ */
+#define BFI_RIT_SEG_POOL1_SIZE 192
+#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
+#define BFI_RIT_SEG_POOLRSS_SIZE 1
+#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE 64
+#define BFI_RIT_SEG_TOTAL_POOLS 2
+#define BFI_RIT_TOTAL_SEGS 193 /* POOL1_SIZE + POOLRSS_SIZE */
+#define init_ritseg_pool(name) \
+static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
+{ \
+ { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }, \
+ { BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE } \
+}
+
+#else /* BNA_BIOS_BUILD */
+
+#define BFI_MAX_TXQ 1
+#define BFI_MAX_RXQ 1
+#define BFI_MAX_RXF 1
+#define BFI_MAX_IB 2
+#define BFI_MAX_RIT_SIZE 2
+#define BFI_RSS_RIT_SIZE 64
+#define BFI_NONRSS_RIT_SIZE 1
+#define BFI_MAX_UCMAC 1
+#define BFI_MAX_MCMAC 8
+#define BFI_IBIDX_SIZE 4
+#define BFI_MAX_VLAN 4095
+/* There is one free pool: 2 segments of 1 index each */
+#define BFI_IBIDX_POOL1_SIZE 2
+#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
+#define BFI_IBIDX_TOTAL_POOLS 1
+#define BFI_IBIDX_TOTAL_SEGS 2 /* POOL1_SIZE */
+#define BFI_IBIDX_MAX_SEGSIZE 1
+#define init_ibidx_pool(name) \
+static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
+{ \
+ { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE } \
+}
+
+#define BFI_RIT_SEG_POOL1_SIZE 1
+#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
+#define BFI_RIT_SEG_TOTAL_POOLS 1
+#define BFI_RIT_TOTAL_SEGS 1 /* POOL1_SIZE */
+#define init_ritseg_pool(name) \
+static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
+{ \
+ { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE } \
+}
+
+#endif /* BNA_BIOS_BUILD */
+
+#define BFI_RSS_HASH_KEY_LEN 10
+
+#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
+#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
+#define BFI_MAX_INTERPKT_COUNT 0xFF
+#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
+#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
+#define BFI_TX_INTERPKT_COUNT 32
+#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
+#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
+#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
+
+#define BFI_TXQ_WI_SIZE 64 /* bytes */
+#define BFI_RXQ_WI_SIZE 8 /* bytes */
+#define BFI_CQ_WI_SIZE 16 /* bytes */
+#define BFI_TX_MAX_WRR_QUOTA 0xFFF
+
+#define BFI_TX_MAX_VECTORS_PER_WI 4
+#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
+#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
+#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
+
+/* Small Q buffer size */
+#define BFI_SMALL_RXBUF_SIZE 128
+
+/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
+#define BFI_FLASH_DMA_BUF_SZ 0x010000 /* 64K DMA */
+#define BFI_HW_STATS_SIZE 0x4000 /* 16K DMA */
+
+/**
+ *
+ * HW register offsets, macros
+ *
+ */
+
+/* DMA Block Register Host Window Start Address */
+#define DMA_BLK_REG_ADDR 0x00013000
+
+/* DMA Block Internal Registers */
+#define DMA_CTRL_REG0 (DMA_BLK_REG_ADDR + 0x000)
+#define DMA_CTRL_REG1 (DMA_BLK_REG_ADDR + 0x004)
+#define DMA_ERR_INT_STATUS (DMA_BLK_REG_ADDR + 0x008)
+#define DMA_ERR_INT_ENABLE (DMA_BLK_REG_ADDR + 0x00c)
+#define DMA_ERR_INT_STATUS_SET (DMA_BLK_REG_ADDR + 0x010)
+
+/* APP Block Register Address Offset from BAR0 */
+#define APP_BLK_REG_ADDR 0x00014000
+
+/* Host Function Interrupt Mask Registers */
+#define HOSTFN0_INT_MASK (APP_BLK_REG_ADDR + 0x004)
+#define HOSTFN1_INT_MASK (APP_BLK_REG_ADDR + 0x104)
+#define HOSTFN2_INT_MASK (APP_BLK_REG_ADDR + 0x304)
+#define HOSTFN3_INT_MASK (APP_BLK_REG_ADDR + 0x404)
+
+/**
+ * Host Function PCIe Error Registers
+ * Duplicates "Correctable" & "Uncorrectable"
+ * registers in PCIe Config space.
+ */
+#define FN0_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x014)
+#define FN1_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x114)
+#define FN2_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x314)
+#define FN3_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x414)
+
+/* Host Function Error Type Status Registers */
+#define FN0_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x018)
+#define FN1_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x118)
+#define FN2_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x318)
+#define FN3_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x418)
+
+/* Host Function Error Type Mask Registers */
+#define FN0_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x01c)
+#define FN1_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x11c)
+#define FN2_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x31c)
+#define FN3_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x41c)
+
+/* Catapult Host Semaphore Status Registers (App block) */
+#define HOST_SEM_STS0_REG (APP_BLK_REG_ADDR + 0x630)
+#define HOST_SEM_STS1_REG (APP_BLK_REG_ADDR + 0x634)
+#define HOST_SEM_STS2_REG (APP_BLK_REG_ADDR + 0x638)
+#define HOST_SEM_STS3_REG (APP_BLK_REG_ADDR + 0x63c)
+#define HOST_SEM_STS4_REG (APP_BLK_REG_ADDR + 0x640)
+#define HOST_SEM_STS5_REG (APP_BLK_REG_ADDR + 0x644)
+#define HOST_SEM_STS6_REG (APP_BLK_REG_ADDR + 0x648)
+#define HOST_SEM_STS7_REG (APP_BLK_REG_ADDR + 0x64c)
+
+/* PCIe Misc Register */
+#define PCIE_MISC_REG (APP_BLK_REG_ADDR + 0x200)
+
+/* Temp Sensor Control Registers */
+#define TEMPSENSE_CNTL_REG (APP_BLK_REG_ADDR + 0x250)
+#define TEMPSENSE_STAT_REG (APP_BLK_REG_ADDR + 0x254)
+
+/* APP Block local error registers */
+#define APP_LOCAL_ERR_STAT (APP_BLK_REG_ADDR + 0x258)
+#define APP_LOCAL_ERR_MSK (APP_BLK_REG_ADDR + 0x25c)
+
+/* PCIe Link Error registers */
+#define PCIE_LNK_ERR_STAT (APP_BLK_REG_ADDR + 0x260)
+#define PCIE_LNK_ERR_MSK (APP_BLK_REG_ADDR + 0x264)
+
+/**
+ * FCoE/FIP Ethertype Register
+ * 31:16 -- Chip wide value for FIP type
+ * 15:0 -- Chip wide value for FCoE type
+ */
+#define FCOE_FIP_ETH_TYPE (APP_BLK_REG_ADDR + 0x280)
+
+/**
+ * Reserved Ethertype Register
+ * 31:16 -- Reserved
+ * 15:0 -- Other ethertype
+ */
+#define RESV_ETH_TYPE (APP_BLK_REG_ADDR + 0x284)
+
+/**
+ * Host Command Status Registers
+ * Each set consists of 3 registers :
+ * clear, set, cmd
+ * 16 such register sets in all
+ * See catapult_spec.pdf for detailed functionality
+ * Put each type in a single macro accessed by _num ?
+ */
+#define HOST_CMDSTS0_CLR_REG (APP_BLK_REG_ADDR + 0x500)
+#define HOST_CMDSTS0_SET_REG (APP_BLK_REG_ADDR + 0x504)
+#define HOST_CMDSTS0_REG (APP_BLK_REG_ADDR + 0x508)
+#define HOST_CMDSTS1_CLR_REG (APP_BLK_REG_ADDR + 0x510)
+#define HOST_CMDSTS1_SET_REG (APP_BLK_REG_ADDR + 0x514)
+#define HOST_CMDSTS1_REG (APP_BLK_REG_ADDR + 0x518)
+#define HOST_CMDSTS2_CLR_REG (APP_BLK_REG_ADDR + 0x520)
+#define HOST_CMDSTS2_SET_REG (APP_BLK_REG_ADDR + 0x524)
+#define HOST_CMDSTS2_REG (APP_BLK_REG_ADDR + 0x528)
+#define HOST_CMDSTS3_CLR_REG (APP_BLK_REG_ADDR + 0x530)
+#define HOST_CMDSTS3_SET_REG (APP_BLK_REG_ADDR + 0x534)
+#define HOST_CMDSTS3_REG (APP_BLK_REG_ADDR + 0x538)
+#define HOST_CMDSTS4_CLR_REG (APP_BLK_REG_ADDR + 0x540)
+#define HOST_CMDSTS4_SET_REG (APP_BLK_REG_ADDR + 0x544)
+#define HOST_CMDSTS4_REG (APP_BLK_REG_ADDR + 0x548)
+#define HOST_CMDSTS5_CLR_REG (APP_BLK_REG_ADDR + 0x550)
+#define HOST_CMDSTS5_SET_REG (APP_BLK_REG_ADDR + 0x554)
+#define HOST_CMDSTS5_REG (APP_BLK_REG_ADDR + 0x558)
+#define HOST_CMDSTS6_CLR_REG (APP_BLK_REG_ADDR + 0x560)
+#define HOST_CMDSTS6_SET_REG (APP_BLK_REG_ADDR + 0x564)
+#define HOST_CMDSTS6_REG (APP_BLK_REG_ADDR + 0x568)
+#define HOST_CMDSTS7_CLR_REG (APP_BLK_REG_ADDR + 0x570)
+#define HOST_CMDSTS7_SET_REG (APP_BLK_REG_ADDR + 0x574)
+#define HOST_CMDSTS7_REG (APP_BLK_REG_ADDR + 0x578)
+#define HOST_CMDSTS8_CLR_REG (APP_BLK_REG_ADDR + 0x580)
+#define HOST_CMDSTS8_SET_REG (APP_BLK_REG_ADDR + 0x584)
+#define HOST_CMDSTS8_REG (APP_BLK_REG_ADDR + 0x588)
+#define HOST_CMDSTS9_CLR_REG (APP_BLK_REG_ADDR + 0x590)
+#define HOST_CMDSTS9_SET_REG (APP_BLK_REG_ADDR + 0x594)
+#define HOST_CMDSTS9_REG (APP_BLK_REG_ADDR + 0x598)
+#define HOST_CMDSTS10_CLR_REG (APP_BLK_REG_ADDR + 0x5A0)
+#define HOST_CMDSTS10_SET_REG (APP_BLK_REG_ADDR + 0x5A4)
+#define HOST_CMDSTS10_REG (APP_BLK_REG_ADDR + 0x5A8)
+#define HOST_CMDSTS11_CLR_REG (APP_BLK_REG_ADDR + 0x5B0)
+#define HOST_CMDSTS11_SET_REG (APP_BLK_REG_ADDR + 0x5B4)
+#define HOST_CMDSTS11_REG (APP_BLK_REG_ADDR + 0x5B8)
+#define HOST_CMDSTS12_CLR_REG (APP_BLK_REG_ADDR + 0x5C0)
+#define HOST_CMDSTS12_SET_REG (APP_BLK_REG_ADDR + 0x5C4)
+#define HOST_CMDSTS12_REG (APP_BLK_REG_ADDR + 0x5C8)
+#define HOST_CMDSTS13_CLR_REG (APP_BLK_REG_ADDR + 0x5D0)
+#define HOST_CMDSTS13_SET_REG (APP_BLK_REG_ADDR + 0x5D4)
+#define HOST_CMDSTS13_REG (APP_BLK_REG_ADDR + 0x5D8)
+#define HOST_CMDSTS14_CLR_REG (APP_BLK_REG_ADDR + 0x5E0)
+#define HOST_CMDSTS14_SET_REG (APP_BLK_REG_ADDR + 0x5E4)
+#define HOST_CMDSTS14_REG (APP_BLK_REG_ADDR + 0x5E8)
+#define HOST_CMDSTS15_CLR_REG (APP_BLK_REG_ADDR + 0x5F0)
+#define HOST_CMDSTS15_SET_REG (APP_BLK_REG_ADDR + 0x5F4)
+#define HOST_CMDSTS15_REG (APP_BLK_REG_ADDR + 0x5F8)
+
+/**
+ * LPU0 Block Register Address Offset from BAR0
+ * Range 0x18000 - 0x18033
+ */
+#define LPU0_BLK_REG_ADDR 0x00018000
+
+/**
+ * LPU0 Registers
+ * Should they be directly used from host,
+ * except for diagnostics ?
+ * CTL_REG : Control register
+ * CMD_REG : Triggers exec. of cmd. in
+ * Mailbox memory
+ */
+#define LPU0_MBOX_CTL_REG (LPU0_BLK_REG_ADDR + 0x000)
+#define LPU0_MBOX_CMD_REG (LPU0_BLK_REG_ADDR + 0x004)
+#define LPU0_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x008)
+#define LPU1_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x00c)
+#define LPU0_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x010)
+#define LPU1_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x014)
+#define LPU0_ERR_STATUS_REG (LPU0_BLK_REG_ADDR + 0x018)
+#define LPU0_ERR_SET_REG (LPU0_BLK_REG_ADDR + 0x020)
+
+/**
+ * LPU1 Block Register Address Offset from BAR0
+ * Range 0x18400 - 0x18433
+ */
+#define LPU1_BLK_REG_ADDR 0x00018400
+
+/**
+ * LPU1 Registers
+ * Same as LPU0 registers above
+ */
+#define LPU1_MBOX_CTL_REG (LPU1_BLK_REG_ADDR + 0x000)
+#define LPU1_MBOX_CMD_REG (LPU1_BLK_REG_ADDR + 0x004)
+#define LPU0_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x008)
+#define LPU1_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x00c)
+#define LPU0_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x010)
+#define LPU1_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x014)
+#define LPU1_ERR_STATUS_REG (LPU1_BLK_REG_ADDR + 0x018)
+#define LPU1_ERR_SET_REG (LPU1_BLK_REG_ADDR + 0x020)
+
+/**
+ * PSS Block Register Address Offset from BAR0
+ * Range 0x18800 - 0x188DB
+ */
+#define PSS_BLK_REG_ADDR 0x00018800
+
+/**
+ * PSS Registers
+ * For details, see catapult_spec.pdf
+ * ERR_STATUS_REG : Indicates error in PSS module
+ * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
+ */
+#define ERR_STATUS_SET (PSS_BLK_REG_ADDR + 0x018)
+#define PSS_RAM_ERR_STATUS_REG (PSS_BLK_REG_ADDR + 0x01C)
+
+/**
+ * PSS Semaphore Lock Registers, total 16
+ * First read when unlocked returns 0,
+ * and is set to 1, atomically.
+ * Subsequent reads returns 1.
+ * To clear set the value to 0.
+ * Range : 0x20 to 0x5c
+ */
+#define PSS_SEM_LOCK_REG(_num) \
+ (PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+/**
+ * PSS Semaphore Status Registers,
+ * corresponding to the lock registers above
+ */
+#define PSS_SEM_STATUS_REG(_num) \
+ (PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
+
+/**
+ * Catapult CPQ Registers
+ * Defines for Mailbox Registers
+ * Used to send mailbox commands to firmware from
+ * host. The data part is written to the MBox
+ * memory, registers are used to indicate that
+ * a commnad is resident in memory.
+ *
+ * Note : LPU0<->LPU1 mailboxes are not listed here
+ */
+#define CPQ_BLK_REG_ADDR 0x00019000
+
+#define HOSTFN0_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x130)
+#define HOSTFN0_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x134)
+#define LPU0_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x138)
+#define LPU1_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x13C)
+
+#define HOSTFN1_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x140)
+#define HOSTFN1_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x144)
+#define LPU0_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x148)
+#define LPU1_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x14C)
+
+#define HOSTFN2_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x170)
+#define HOSTFN2_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x174)
+#define LPU0_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x178)
+#define LPU1_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x17C)
+
+#define HOSTFN3_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x180)
+#define HOSTFN3_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x184)
+#define LPU0_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x188)
+#define LPU1_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x18C)
+
+/* Host Function Force Parity Error Registers */
+#define HOSTFN0_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x120)
+#define HOSTFN1_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x124)
+#define HOSTFN2_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x128)
+#define HOSTFN3_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x12C)
+
+/* LL Port[0|1] Halt Mask Registers */
+#define LL_HALT_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1A0)
+#define LL_HALT_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1B0)
+
+/* LL Port[0|1] Error Mask Registers */
+#define LL_ERR_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1D0)
+#define LL_ERR_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1D4)
+
+/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
+#define FLI_BLK_REG_ADDR 0x0001D000
+
+/* EMC FLI Registers */
+#define FLI_CMD_REG (FLI_BLK_REG_ADDR + 0x000)
+#define FLI_ADDR_REG (FLI_BLK_REG_ADDR + 0x004)
+#define FLI_CTL_REG (FLI_BLK_REG_ADDR + 0x008)
+#define FLI_WRDATA_REG (FLI_BLK_REG_ADDR + 0x00C)
+#define FLI_RDDATA_REG (FLI_BLK_REG_ADDR + 0x010)
+#define FLI_DEV_STATUS_REG (FLI_BLK_REG_ADDR + 0x014)
+#define FLI_SIG_WD_REG (FLI_BLK_REG_ADDR + 0x018)
+
+/**
+ * RO register
+ * 31:16 -- Vendor Id
+ * 15:0 -- Device Id
+ */
+#define FLI_DEV_VENDOR_REG (FLI_BLK_REG_ADDR + 0x01C)
+#define FLI_ERR_STATUS_REG (FLI_BLK_REG_ADDR + 0x020)
+
+/**
+ * RAD (RxAdm) Block Register Address Offset from BAR0
+ * RAD0 Range : 0x20000 - 0x203FF
+ * RAD1 Range : 0x20400 - 0x207FF
+ */
+#define RAD0_BLK_REG_ADDR 0x00020000
+#define RAD1_BLK_REG_ADDR 0x00020400
+
+/* RAD0 Registers */
+#define RAD0_CTL_REG (RAD0_BLK_REG_ADDR + 0x000)
+#define RAD0_PE_PARM_REG (RAD0_BLK_REG_ADDR + 0x004)
+#define RAD0_BCN_REG (RAD0_BLK_REG_ADDR + 0x008)
+
+/* Default function ID register */
+#define RAD0_DEFAULT_REG (RAD0_BLK_REG_ADDR + 0x00C)
+
+/* Default promiscuous ID register */
+#define RAD0_PROMISC_REG (RAD0_BLK_REG_ADDR + 0x010)
+
+#define RAD0_BCNQ_REG (RAD0_BLK_REG_ADDR + 0x014)
+
+/*
+ * This register selects 1 of 8 PM Q's using
+ * VLAN pri, for non-BCN packets without a VLAN tag
+ */
+#define RAD0_DEFAULTQ_REG (RAD0_BLK_REG_ADDR + 0x018)
+
+#define RAD0_ERR_STS (RAD0_BLK_REG_ADDR + 0x01C)
+#define RAD0_SET_ERR_STS (RAD0_BLK_REG_ADDR + 0x020)
+#define RAD0_ERR_INT_EN (RAD0_BLK_REG_ADDR + 0x024)
+#define RAD0_FIRST_ERR (RAD0_BLK_REG_ADDR + 0x028)
+#define RAD0_FORCE_ERR (RAD0_BLK_REG_ADDR + 0x02C)
+
+#define RAD0_IF_RCVD (RAD0_BLK_REG_ADDR + 0x030)
+#define RAD0_IF_RCVD_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x034)
+#define RAD0_IF_RCVD_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x038)
+#define RAD0_IF_RCVD_VLAN (RAD0_BLK_REG_ADDR + 0x03C)
+#define RAD0_IF_RCVD_UCAST (RAD0_BLK_REG_ADDR + 0x040)
+#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x044)
+#define RAD0_IF_RCVD_UCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x048)
+#define RAD0_IF_RCVD_UCAST_VLAN (RAD0_BLK_REG_ADDR + 0x04C)
+#define RAD0_IF_RCVD_MCAST (RAD0_BLK_REG_ADDR + 0x050)
+#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x054)
+#define RAD0_IF_RCVD_MCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x058)
+#define RAD0_IF_RCVD_MCAST_VLAN (RAD0_BLK_REG_ADDR + 0x05C)
+#define RAD0_IF_RCVD_BCAST (RAD0_BLK_REG_ADDR + 0x060)
+#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x064)
+#define RAD0_IF_RCVD_BCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x068)
+#define RAD0_IF_RCVD_BCAST_VLAN (RAD0_BLK_REG_ADDR + 0x06C)
+#define RAD0_DROPPED_FRAMES (RAD0_BLK_REG_ADDR + 0x070)
+
+#define RAD0_MAC_MAN_1H (RAD0_BLK_REG_ADDR + 0x080)
+#define RAD0_MAC_MAN_1L (RAD0_BLK_REG_ADDR + 0x084)
+#define RAD0_MAC_MAN_2H (RAD0_BLK_REG_ADDR + 0x088)
+#define RAD0_MAC_MAN_2L (RAD0_BLK_REG_ADDR + 0x08C)
+#define RAD0_MAC_MAN_3H (RAD0_BLK_REG_ADDR + 0x090)
+#define RAD0_MAC_MAN_3L (RAD0_BLK_REG_ADDR + 0x094)
+#define RAD0_MAC_MAN_4H (RAD0_BLK_REG_ADDR + 0x098)
+#define RAD0_MAC_MAN_4L (RAD0_BLK_REG_ADDR + 0x09C)
+
+#define RAD0_LAST4_IP (RAD0_BLK_REG_ADDR + 0x100)
+
+/* RAD1 Registers */
+#define RAD1_CTL_REG (RAD1_BLK_REG_ADDR + 0x000)
+#define RAD1_PE_PARM_REG (RAD1_BLK_REG_ADDR + 0x004)
+#define RAD1_BCN_REG (RAD1_BLK_REG_ADDR + 0x008)
+
+/* Default function ID register */
+#define RAD1_DEFAULT_REG (RAD1_BLK_REG_ADDR + 0x00C)
+
+/* Promiscuous function ID register */
+#define RAD1_PROMISC_REG (RAD1_BLK_REG_ADDR + 0x010)
+
+#define RAD1_BCNQ_REG (RAD1_BLK_REG_ADDR + 0x014)
+
+/*
+ * This register selects 1 of 8 PM Q's using
+ * VLAN pri, for non-BCN packets without a VLAN tag
+ */
+#define RAD1_DEFAULTQ_REG (RAD1_BLK_REG_ADDR + 0x018)
+
+#define RAD1_ERR_STS (RAD1_BLK_REG_ADDR + 0x01C)
+#define RAD1_SET_ERR_STS (RAD1_BLK_REG_ADDR + 0x020)
+#define RAD1_ERR_INT_EN (RAD1_BLK_REG_ADDR + 0x024)
+
+/**
+ * TXA Block Register Address Offset from BAR0
+ * TXA0 Range : 0x21000 - 0x213FF
+ * TXA1 Range : 0x21400 - 0x217FF
+ */
+#define TXA0_BLK_REG_ADDR 0x00021000
+#define TXA1_BLK_REG_ADDR 0x00021400
+
+/* TXA Registers */
+#define TXA0_CTRL_REG (TXA0_BLK_REG_ADDR + 0x000)
+#define TXA1_CTRL_REG (TXA1_BLK_REG_ADDR + 0x000)
+
+/**
+ * TSO Sequence # Registers (RO)
+ * Total 8 (for 8 queues)
+ * Holds the last seq.# for TSO frames
+ * See catapult_spec.pdf for more details
+ */
+#define TXA0_TSO_TCP_SEQ_REG(_num) \
+ (TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+#define TXA1_TSO_TCP_SEQ_REG(_num) \
+ (TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+/**
+ * TSO IP ID # Registers (RO)
+ * Total 8 (for 8 queues)
+ * Holds the last IP ID for TSO frames
+ * See catapult_spec.pdf for more details
+ */
+#define TXA0_TSO_IP_INFO_REG(_num) \
+ (TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+#define TXA1_TSO_IP_INFO_REG(_num) \
+ (TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+/**
+ * RXA Block Register Address Offset from BAR0
+ * RXA0 Range : 0x21800 - 0x21BFF
+ * RXA1 Range : 0x21C00 - 0x21FFF
+ */
+#define RXA0_BLK_REG_ADDR 0x00021800
+#define RXA1_BLK_REG_ADDR 0x00021C00
+
+/* RXA Registers */
+#define RXA0_CTL_REG (RXA0_BLK_REG_ADDR + 0x040)
+#define RXA1_CTL_REG (RXA1_BLK_REG_ADDR + 0x040)
+
+/**
+ * PPLB Block Register Address Offset from BAR0
+ * PPLB0 Range : 0x22000 - 0x223FF
+ * PPLB1 Range : 0x22400 - 0x227FF
+ */
+#define PLB0_BLK_REG_ADDR 0x00022000
+#define PLB1_BLK_REG_ADDR 0x00022400
+
+/**
+ * PLB Registers
+ * Holds RL timer used time stamps in RLT tagged frames
+ */
+#define PLB0_ECM_TIMER_REG (PLB0_BLK_REG_ADDR + 0x05C)
+#define PLB1_ECM_TIMER_REG (PLB1_BLK_REG_ADDR + 0x05C)
+
+/* Controls the rate-limiter on each of the priority class */
+#define PLB0_RL_CTL (PLB0_BLK_REG_ADDR + 0x060)
+#define PLB1_RL_CTL (PLB1_BLK_REG_ADDR + 0x060)
+
+/**
+ * Max byte register, total 8, 0-7
+ * see catapult_spec.pdf for details
+ */
+#define PLB0_RL_MAX_BC(_num) \
+ (PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
+#define PLB1_RL_MAX_BC(_num) \
+ (PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
+
+/**
+ * RL Time Unit Register for priority 0-7
+ * 4 bits per priority
+ * (2^rl_unit)*1us is the actual time period
+ */
+#define PLB0_RL_TU_PRIO (PLB0_BLK_REG_ADDR + 0x084)
+#define PLB1_RL_TU_PRIO (PLB1_BLK_REG_ADDR + 0x084)
+
+/**
+ * RL byte count register,
+ * bytes transmitted in (rl_unit*1)us time period
+ * 1 per priority, 8 in all, 0-7.
+ */
+#define PLB0_RL_BYTE_CNT(_num) \
+ (PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
+#define PLB1_RL_BYTE_CNT(_num) \
+ (PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
+
+/**
+ * RL Min factor register
+ * 2 bits per priority,
+ * 4 factors possible: 1, 0.5, 0.25, 0
+ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
+ */
+#define PLB0_RL_MIN_REG (PLB0_BLK_REG_ADDR + 0x0A8)
+#define PLB1_RL_MIN_REG (PLB1_BLK_REG_ADDR + 0x0A8)
+
+/**
+ * RL Max factor register
+ * 2 bits per priority,
+ * 4 factors possible: 1, 0.5, 0.25, 0
+ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
+ */
+#define PLB0_RL_MAX_REG (PLB0_BLK_REG_ADDR + 0x0AC)
+#define PLB1_RL_MAX_REG (PLB1_BLK_REG_ADDR + 0x0AC)
+
+/* MAC SERDES Address Paging register */
+#define PLB0_EMS_ADD_REG (PLB0_BLK_REG_ADDR + 0xD0)
+#define PLB1_EMS_ADD_REG (PLB1_BLK_REG_ADDR + 0xD0)
+
+/* LL EMS Registers */
+#define LL_EMS0_BLK_REG_ADDR 0x00026800
+#define LL_EMS1_BLK_REG_ADDR 0x00026C00
+
+/**
+ * BPC Block Register Address Offset from BAR0
+ * BPC0 Range : 0x23000 - 0x233FF
+ * BPC1 Range : 0x23400 - 0x237FF
+ */
+#define BPC0_BLK_REG_ADDR 0x00023000
+#define BPC1_BLK_REG_ADDR 0x00023400
+
+/**
+ * PMM Block Register Address Offset from BAR0
+ * PMM0 Range : 0x23800 - 0x23BFF
+ * PMM1 Range : 0x23C00 - 0x23FFF
+ */
+#define PMM0_BLK_REG_ADDR 0x00023800
+#define PMM1_BLK_REG_ADDR 0x00023C00
+
+/**
+ * HQM Block Register Address Offset from BAR0
+ * HQM0 Range : 0x24000 - 0x243FF
+ * HQM1 Range : 0x24400 - 0x247FF
+ */
+#define HQM0_BLK_REG_ADDR 0x00024000
+#define HQM1_BLK_REG_ADDR 0x00024400
+
+/**
+ * HQM Control Register
+ * Controls some aspects of IB
+ * See catapult_spec.pdf for details
+ */
+#define HQM0_CTL_REG (HQM0_BLK_REG_ADDR + 0x000)
+#define HQM1_CTL_REG (HQM1_BLK_REG_ADDR + 0x000)
+
+/**
+ * HQM Stop Q Semaphore Registers.
+ * Only one Queue resource can be stopped at
+ * any given time. This register controls access
+ * to the single stop Q resource.
+ * See catapult_spec.pdf for details
+ */
+#define HQM0_RXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x028)
+#define HQM0_TXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x02C)
+#define HQM1_RXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x028)
+#define HQM1_TXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x02C)
+
+/**
+ * LUT Block Register Address Offset from BAR0
+ * LUT0 Range : 0x25800 - 0x25BFF
+ * LUT1 Range : 0x25C00 - 0x25FFF
+ */
+#define LUT0_BLK_REG_ADDR 0x00025800
+#define LUT1_BLK_REG_ADDR 0x00025C00
+
+/**
+ * LUT Registers
+ * See catapult_spec.pdf for details
+ */
+#define LUT0_ERR_STS (LUT0_BLK_REG_ADDR + 0x000)
+#define LUT1_ERR_STS (LUT1_BLK_REG_ADDR + 0x000)
+#define LUT0_SET_ERR_STS (LUT0_BLK_REG_ADDR + 0x004)
+#define LUT1_SET_ERR_STS (LUT1_BLK_REG_ADDR + 0x004)
+
+/**
+ * TRC (Debug/Trace) Register Offset from BAR0
+ * Range : 0x26000 -- 0x263FFF
+ */
+#define TRC_BLK_REG_ADDR 0x00026000
+
+/**
+ * TRC Registers
+ * See catapult_spec.pdf for details of each
+ */
+#define TRC_CTL_REG (TRC_BLK_REG_ADDR + 0x000)
+#define TRC_MODS_REG (TRC_BLK_REG_ADDR + 0x004)
+#define TRC_TRGC_REG (TRC_BLK_REG_ADDR + 0x008)
+#define TRC_CNT1_REG (TRC_BLK_REG_ADDR + 0x010)
+#define TRC_CNT2_REG (TRC_BLK_REG_ADDR + 0x014)
+#define TRC_NXTS_REG (TRC_BLK_REG_ADDR + 0x018)
+#define TRC_DIRR_REG (TRC_BLK_REG_ADDR + 0x01C)
+
+/**
+ * TRC Trigger match filters, total 10
+ * Determines the trigger condition
+ */
+#define TRC_TRGM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+/**
+ * TRC Next State filters, total 10
+ * Determines the next state conditions
+ */
+#define TRC_NXTM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
+
+/**
+ * TRC Store Match filters, total 10
+ * Determines the store conditions
+ */
+#define TRC_STRM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
+
+/* DOORBELLS ACCESS */
+
+/**
+ * Catapult doorbells
+ * Each doorbell-queue set has
+ * 1 RxQ, 1 TxQ, 2 IBs in that order
+ * Size of each entry in 32 bytes, even though only 1 word
+ * is used. For Non-VM case each doorbell-q set is
+ * separated by 128 bytes, for VM case it is separated
+ * by 4K bytes
+ * Non VM case Range : 0x38000 - 0x39FFF
+ * VM case Range : 0x100000 - 0x11FFFF
+ * The range applies to both HQMs
+ */
+#define HQM_DOORBELL_BLK_BASE_ADDR 0x00038000
+#define HQM_DOORBELL_VM_BLK_BASE_ADDR 0x00100000
+
+/* MEMORY ACCESS */
+
+/**
+ * Catapult H/W Block Memory Access Address
+ * To the host a memory space of 32K (page) is visible
+ * at a time. The address range is from 0x08000 to 0x0FFFF
+ */
+#define HW_BLK_HOST_MEM_ADDR 0x08000
+
+/**
+ * Catapult LUT Memory Access Page Numbers
+ * Range : LUT0 0xa0-0xa1
+ * LUT1 0xa2-0xa3
+ */
+#define LUT0_MEM_BLK_BASE_PG_NUM 0x000000A0
+#define LUT1_MEM_BLK_BASE_PG_NUM 0x000000A2
+
+/**
+ * Catapult RxFn Database Memory Block Base Offset
+ *
+ * The Rx function database exists in LUT block.
+ * In PCIe space this is accessible as a 256x32
+ * bit block. Each entry in this database is 4
+ * (4 byte) words. Max. entries is 64.
+ * Address of an entry corresponding to a function
+ * = base_addr + (function_no. * 16)
+ */
+#define RX_FNDB_RAM_BASE_OFFSET 0x0000B400
+
+/**
+ * Catapult TxFn Database Memory Block Base Offset Address
+ *
+ * The Tx function database exists in LUT block.
+ * In PCIe space this is accessible as a 64x32
+ * bit block. Each entry in this database is 1
+ * (4 byte) word. Max. entries is 64.
+ * Address of an entry corresponding to a function
+ * = base_addr + (function_no. * 4)
+ */
+#define TX_FNDB_RAM_BASE_OFFSET 0x0000B800
+
+/**
+ * Catapult Unicast CAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x48 bits; mapped to PCIe space
+ * 512x32 bit blocks. For each address, bits
+ * are written in the order : [47:32] and then
+ * [31:0].
+ */
+#define UCAST_CAM_BASE_OFFSET 0x0000A800
+
+/**
+ * Catapult Unicast RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x9 bits.
+ */
+#define UCAST_RAM_BASE_OFFSET 0x0000B000
+
+/**
+ * Catapult Mulicast CAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x48 bits; mapped to PCIe space
+ * 512x32 bit blocks. For each address, bits
+ * are written in the order : [47:32] and then
+ * [31:0].
+ */
+#define MCAST_CAM_BASE_OFFSET 0x0000A000
+
+/**
+ * Catapult VLAN RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 4096x66 bits; mapped to PCIe space as
+ * 8192x32 bit blocks.
+ * All the 4K entries are within the address range
+ * 0x0000 to 0x8000, so in the first LUT page.
+ */
+#define VLAN_RAM_BASE_OFFSET 0x00000000
+
+/**
+ * Catapult Tx Stats RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 1024x33 bits;
+ * Each Tx function has 64 bytes of space
+ */
+#define TX_STATS_RAM_BASE_OFFSET 0x00009000
+
+/**
+ * Catapult Rx Stats RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 1024x33 bits;
+ * Each Rx function has 64 bytes of space
+ */
+#define RX_STATS_RAM_BASE_OFFSET 0x00008000
+
+/* Catapult RXA Memory Access Page Numbers */
+#define RXA0_MEM_BLK_BASE_PG_NUM 0x0000008C
+#define RXA1_MEM_BLK_BASE_PG_NUM 0x0000008D
+
+/**
+ * Catapult Multicast Vector Table Base Offset Address
+ *
+ * Exists in RxA memory space.
+ * Organized as 512x65 bit block.
+ * However for each entry 16 bytes allocated (power of 2)
+ * Total size 512*16 bytes.
+ * There are two logical divisions, 256 entries each :
+ * a) Entries 0x00 to 0xff (256) -- Approx. MVT
+ * Offset 0x000 to 0xFFF
+ * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
+ * Offsets 0x1000 to 0x1FFF
+ */
+#define MCAST_APPROX_MVT_BASE_OFFSET 0x00000000
+#define MCAST_EXACT_MVT_BASE_OFFSET 0x00001000
+
+/**
+ * Catapult RxQ Translate Table (RIT) Base Offset Address
+ *
+ * Exists in RxA memory space
+ * Total no. of entries 64
+ * Each entry is 1 (4 byte) word.
+ * 31:12 -- Reserved
+ * 11:0 -- Two 6 bit RxQ Ids
+ */
+#define FUNCTION_TO_RXQ_TRANSLATE 0x00002000
+
+/* Catapult RxAdm (RAD) Memory Access Page Numbers */
+#define RAD0_MEM_BLK_BASE_PG_NUM 0x00000086
+#define RAD1_MEM_BLK_BASE_PG_NUM 0x00000087
+
+/**
+ * Catapult RSS Table Base Offset Address
+ *
+ * Exists in RAD memory space.
+ * Each entry is 352 bits, but alligned on
+ * 64 byte (512 bit) boundary. Accessed
+ * 4 byte words, the whole entry can be
+ * broken into 11 word accesses.
+ */
+#define RSS_TABLE_BASE_OFFSET 0x00000800
+
+/**
+ * Catapult CPQ Block Page Number
+ * This value is written to the page number registers
+ * to access the memory associated with the mailboxes.
+ */
+#define CPQ_BLK_PG_NUM 0x00000005
+
+/**
+ * Clarification :
+ * LL functions are 2 & 3; can HostFn0/HostFn1
+ * <-> LPU0/LPU1 memories be used ?
+ */
+/**
+ * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
+ * Per catapult_spec.pdf, the offset of the mbox
+ * memory is in the register space at an offset of 0x200
+ */
+#define CPQ_BLK_REG_MBOX_ADDR (CPQ_BLK_REG_ADDR + 0x200)
+
+#define HOSTFN_LPU_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x000)
+
+/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
+#define LPU_HOSTFN_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x080)
+
+/**
+ * Catapult HQM Block Page Number
+ * This is written to the page number register for
+ * the appropriate function to access the memory
+ * associated with HQM
+ */
+#define HQM0_BLK_PG_NUM 0x00000096
+#define HQM1_BLK_PG_NUM 0x00000097
+
+/**
+ * Note that TxQ and RxQ entries are interlaced
+ * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
+ */
+
+#define HQM_RXTX_Q_RAM_BASE_OFFSET 0x00004000
+
+/**
+ * CQ Memory
+ * Exists in HQM Memory space
+ * Each entry is 16 (4 byte) words of which
+ * only 12 words are used for configuration
+ * Total 64 entries per HQM memory space
+ */
+#define HQM_CQ_RAM_BASE_OFFSET 0x00006000
+
+/**
+ * Interrupt Block (IB) Memory
+ * Exists in HQM Memory space
+ * Each entry is 8 (4 byte) words of which
+ * only 5 words are used for configuration
+ * Total 128 entries per HQM memory space
+ */
+#define HQM_IB_RAM_BASE_OFFSET 0x00001000
+
+/**
+ * Index Table (IT) Memory
+ * Exists in HQM Memory space
+ * Each entry is 1 (4 byte) word which
+ * is used for configuration
+ * Total 128 entries per HQM memory space
+ */
+#define HQM_INDX_TBL_RAM_BASE_OFFSET 0x00002000
+
+/**
+ * PSS Block Memory Page Number
+ * This is written to the appropriate page number
+ * register to access the CPU memory.
+ * Also known as the PSS secondary memory (SMEM).
+ * Range : 0x180 to 0x1CF
+ * See catapult_spec.pdf for details
+ */
+#define PSS_BLK_PG_NUM 0x00000180
+
+/**
+ * Offsets of different instances of PSS SMEM
+ * 2.5M of continuous 1T memory space : 2 blocks
+ * of 1M each (32 pages each, page=32KB) and 4 smaller
+ * blocks of 128K each (4 pages each, page=32KB)
+ * PSS_LMEM_INST0 is used for firmware download
+ */
+#define PSS_LMEM_INST0 0x00000000
+#define PSS_LMEM_INST1 0x00100000
+#define PSS_LMEM_INST2 0x00200000
+#define PSS_LMEM_INST3 0x00220000
+#define PSS_LMEM_INST4 0x00240000
+#define PSS_LMEM_INST5 0x00260000
+
+#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
+
+#define BNA_GET_PAGE_NUM(_base_page, _offset) \
+ ((_base_page) + ((_offset) >> 15))
+
+#define BNA_GET_PAGE_OFFSET(_offset) \
+ ((_offset) & 0x7fff)
+
+#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset) \
+ ((_bar0) + HW_BLK_HOST_MEM_ADDR \
+ + BNA_GET_PAGE_OFFSET((_base_offset)))
+
+#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
+ (_bar0 + (HW_BLK_HOST_MEM_ADDR) \
+ + (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET)) \
+ + (((_fn_id) & 0x3f) << 9) \
+ + (((_vlan_id) & 0xfe0) >> 3))
+
+/**
+ *
+ * Interrupt related bits, flags and macros
+ *
+ */
+
+#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
+#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
+#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
+#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
+
+#define __LPU02HOST_MBOX0_MASK_BITS 0x00100000
+#define __LPU12HOST_MBOX0_MASK_BITS 0x00200000
+#define __LPU02HOST_MBOX1_MASK_BITS 0x00400000
+#define __LPU12HOST_MBOX1_MASK_BITS 0x00800000
+
+#define __LPU2HOST_MBOX_MASK_BITS \
+ (__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS | \
+ __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
+
+#define __LPU2HOST_IB_STATUS_BITS 0x0000ffff
+
+#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
+ ((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS))
+
+#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
+ ((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS))
+
+#define BNA_IS_MBOX_INTR(_intr_status) \
+ ((_intr_status) & \
+ (__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS))
+
+#define __EMC_ERROR_STATUS_BITS 0x00010000
+#define __LPU0_ERROR_STATUS_BITS 0x00020000
+#define __LPU1_ERROR_STATUS_BITS 0x00040000
+#define __PSS_ERROR_STATUS_BITS 0x00080000
+
+#define __HALT_STATUS_BITS 0x01000000
+
+#define __EMC_ERROR_MASK_BITS 0x00010000
+#define __LPU0_ERROR_MASK_BITS 0x00020000
+#define __LPU1_ERROR_MASK_BITS 0x00040000
+#define __PSS_ERROR_MASK_BITS 0x00080000
+
+#define __HALT_MASK_BITS 0x01000000
+
+#define __ERROR_MASK_BITS \
+ (__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
+ __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
+ __HALT_MASK_BITS)
+
+#define BNA_IS_ERR_INTR(_intr_status) \
+ ((_intr_status) & \
+ (__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
+ __HALT_STATUS_BITS))
+
+#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
+ (BNA_IS_MBOX_INTR((_intr_status)) | \
+ BNA_IS_ERR_INTR((_intr_status)))
+
+#define BNA_IS_INTX_DATA_INTR(_intr_status) \
+ ((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
+
+#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
+do { \
+ (_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS); \
+} while (0)
+
+#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
+do { \
+ (_intr_status) &= ~(__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
+ __HALT_STATUS_BITS); \
+} while (0)
+
+#define bna_intx_disable(_bna, _cur_mask) \
+{ \
+ (_cur_mask) = readl((_bna)->regs.fn_int_mask);\
+ writel(0xffffffff, (_bna)->regs.fn_int_mask);\
+}
+
+#define bna_intx_enable(bna, new_mask) \
+ writel((new_mask), (bna)->regs.fn_int_mask)
+
+#define bna_mbox_intr_disable(bna) \
+ writel((readl((bna)->regs.fn_int_mask) | \
+ (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_mbox_intr_enable(bna) \
+ writel((readl((bna)->regs.fn_int_mask) & \
+ ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_intr_status_get(_bna, _status) \
+{ \
+ (_status) = readl((_bna)->regs.fn_int_status); \
+ if ((_status)) { \
+ writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
+ __LPU02HOST_MBOX1_STATUS_BITS |\
+ __LPU12HOST_MBOX0_STATUS_BITS |\
+ __LPU12HOST_MBOX1_STATUS_BITS), \
+ (_bna)->regs.fn_int_status);\
+ } \
+}
+
+#define bna_intr_status_get_no_clr(_bna, _status) \
+ (_status) = readl((_bna)->regs.fn_int_status)
+
+#define bna_intr_mask_get(bna, mask) \
+ (*mask) = readl((bna)->regs.fn_int_mask)
+
+#define bna_intr_ack(bna, intr_bmap) \
+ writel((intr_bmap), (bna)->regs.fn_int_status)
+
+#define bna_ib_intx_disable(bna, ib_id) \
+ writel(readl((bna)->regs.fn_int_mask) | \
+ (1 << (ib_id)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_ib_intx_enable(bna, ib_id) \
+ writel(readl((bna)->regs.fn_int_mask) & \
+ ~(1 << (ib_id)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_mbox_msix_idx_set(_device) \
+do {\
+ writel(((_device)->vector & 0x000001FF), \
+ (_device)->bna->pcidev.pci_bar_kva + \
+ reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
+} while (0)
+
+/**
+ *
+ * TxQ, RxQ, CQ related bits, offsets, macros
+ *
+ */
+
+#define BNA_Q_IDLE_STATE 0x00008001
+
+#define BNA_GET_DOORBELL_BASE_ADDR(_bar0) \
+ ((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
+
+#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry) \
+ ((HQM_DOORBELL_BLK_BASE_ADDR) \
+ + (_entry << 7))
+
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* TxQ Entry Opcodes */
+#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
+
+/* TxQ Entry Control Flags */
+#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+
+#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
+ (((_hdr_size) << 10) | ((_offset) & 0x3FF))
+
+/*
+ * Completion Q defines
+ */
+/* CQ Entry Flags */
+#define BNA_CQ_EF_MAC_ERROR (1 << 0)
+#define BNA_CQ_EF_FCS_ERROR (1 << 1)
+#define BNA_CQ_EF_TOO_LONG (1 << 2)
+#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
+
+#define BNA_CQ_EF_RSVD1 (1 << 4)
+#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
+#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
+#define BNA_CQ_EF_HDS_HEADER (1 << 7)
+
+#define BNA_CQ_EF_UDP (1 << 8)
+#define BNA_CQ_EF_TCP (1 << 9)
+#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
+#define BNA_CQ_EF_IPV6 (1 << 11)
+
+#define BNA_CQ_EF_IPV4 (1 << 12)
+#define BNA_CQ_EF_VLAN (1 << 13)
+#define BNA_CQ_EF_RSS (1 << 14)
+#define BNA_CQ_EF_RSVD2 (1 << 15)
+
+#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
+#define BNA_CQ_EF_MCAST (1 << 17)
+#define BNA_CQ_EF_BCAST (1 << 18)
+#define BNA_CQ_EF_REMOTE (1 << 19)
+
+#define BNA_CQ_EF_LOCAL (1 << 20)
+
+/**
+ *
+ * Data structures
+ *
+ */
+
+enum txf_flags {
+ BFI_TXF_CF_ENABLE = 1 << 0,
+ BFI_TXF_CF_VLAN_FILTER = 1 << 8,
+ BFI_TXF_CF_VLAN_ADMIT = 1 << 9,
+ BFI_TXF_CF_VLAN_INSERT = 1 << 10,
+ BFI_TXF_CF_RSVD1 = 1 << 11,
+ BFI_TXF_CF_MAC_SA_CHECK = 1 << 12,
+ BFI_TXF_CF_VLAN_WI_BASED = 1 << 13,
+ BFI_TXF_CF_VSWITCH_MCAST = 1 << 14,
+ BFI_TXF_CF_VSWITCH_UCAST = 1 << 15,
+ BFI_TXF_CF_RSVD2 = 0x7F << 1
+};
+
+enum ib_flags {
+ BFI_IB_CF_MASTER_ENABLE = (1 << 0),
+ BFI_IB_CF_MSIX_MODE = (1 << 1),
+ BFI_IB_CF_COALESCING_MODE = (1 << 2),
+ BFI_IB_CF_INTER_PKT_ENABLE = (1 << 3),
+ BFI_IB_CF_INT_ENABLE = (1 << 4),
+ BFI_IB_CF_INTER_PKT_DMA = (1 << 5),
+ BFI_IB_CF_ACK_PENDING = (1 << 6),
+ BFI_IB_CF_RESERVED1 = (1 << 7)
+};
+
+enum rss_hash_type {
+ BFI_RSS_T_V4_TCP = (1 << 11),
+ BFI_RSS_T_V4_IP = (1 << 10),
+ BFI_RSS_T_V6_TCP = (1 << 9),
+ BFI_RSS_T_V6_IP = (1 << 8)
+};
+enum hds_header_type {
+ BNA_HDS_T_V4_TCP = (1 << 11),
+ BNA_HDS_T_V4_UDP = (1 << 10),
+ BNA_HDS_T_V6_TCP = (1 << 9),
+ BNA_HDS_T_V6_UDP = (1 << 8),
+ BNA_HDS_FORCED = (1 << 7),
+};
+enum rxf_flags {
+ BNA_RXF_CF_SM_LG_RXQ = (1 << 15),
+ BNA_RXF_CF_DEFAULT_VLAN = (1 << 14),
+ BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE = (1 << 13),
+ BNA_RXF_CF_VLAN_STRIP = (1 << 12),
+ BNA_RXF_CF_RSS_ENABLE = (1 << 8)
+};
+struct bna_chip_regs_offset {
+ u32 page_addr;
+ u32 fn_int_status;
+ u32 fn_int_mask;
+ u32 msix_idx;
+};
+extern const struct bna_chip_regs_offset reg_offset[];
+
+struct bna_chip_regs {
+ void __iomem *page_addr;
+ void __iomem *fn_int_status;
+ void __iomem *fn_int_mask;
+};
+
+struct bna_txq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
+ /* 23:16->Int Blk Offset */
+ /* 15:0 ->consumer pointer(index?) */
+ u32 cns_ptr2_n_q_state; /* 31:16->cons. ptr 2; 15:0-> Q state */
+ u32 nxt_qid_n_fid_n_pri; /* 17:10->next */
+ /* QId;9:3->FID;2:0->Priority */
+ u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
+ /* 23:12->Cfg Quota; */
+ /* 11:0 ->Run Quota */
+ u32 reserved3[4];
+};
+
+struct bna_rxq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
+ /* 23:16->CQ; */
+ /* 15:0->consumer pointer(index?) */
+ u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
+ u32 next_qid; /* 17:10->next QId */
+ u32 reserved3;
+ u32 reserved4[4];
+};
+
+struct bna_rxtx_q_mem {
+ struct bna_rxq_mem rxq;
+ struct bna_txq_mem txq;
+};
+
+struct bna_cq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
+ /* 23:16->Int Blk Offset */
+ /* 15:0 ->consumer pointer(index?) */
+ u32 q_state; /* 31:16->reserved; 15:0-> Q state */
+ u32 reserved3[2];
+ u32 reserved4[4];
+};
+
+struct bna_ib_blk_mem {
+ u32 host_addr_lo;
+ u32 host_addr_hi;
+ u32 clsc_n_ctrl_n_msix; /* 31:24->coalescing; */
+ /* 23:16->coalescing cfg; */
+ /* 15:8 ->control; */
+ /* 7:0 ->msix; */
+ u32 ipkt_n_ent_n_idxof;
+ u32 ipkt_cnt_cfg_n_unacked;
+
+ u32 reserved[3];
+};
+
+struct bna_idx_tbl_mem {
+ u32 idx; /* !< 31:16->res;15:0->idx; */
+};
+
+struct bna_doorbell_qset {
+ u32 rxq[0x20 >> 2];
+ u32 txq[0x20 >> 2];
+ u32 ib0[0x20 >> 2];
+ u32 ib1[0x20 >> 2];
+};
+
+struct bna_rx_fndb_ram {
+ u32 rss_prop;
+ u32 size_routing_props;
+ u32 rit_hds_mcastq;
+ u32 control_flags;
+};
+
+struct bna_tx_fndb_ram {
+ u32 vlan_n_ctrl_flags;
+};
+
+/**
+ * @brief
+ * Structure which maps to RxFn Indirection Table (RIT)
+ * Size : 1 word
+ * See catapult_spec.pdf, RxA for details
+ */
+struct bna_rit_mem {
+ u32 rxq_ids; /* !< 31:12->res;11:0->two 6 bit RxQ Ids */
+};
+
+/**
+ * @brief
+ * Structure which maps to RSS Table entry
+ * Size : 16 words
+ * See catapult_spec.pdf, RAD for details
+ */
+struct bna_rss_mem {
+ /*
+ * 31:12-> res
+ * 11:8 -> protocol type
+ * 7:0 -> hash index
+ */
+ u32 type_n_hash;
+ u32 hash_key[10]; /* !< 40 byte Toeplitz hash key */
+ u32 reserved[5];
+};
+
+/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
+struct bna_dma_addr {
+ u32 msb;
+ u32 lsb;
+};
+
+struct bna_txq_wi_vector {
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
+ struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
+};
+
+typedef u16 bna_txq_wi_opcode_t;
+
+typedef u16 bna_txq_wi_ctrl_flag_t;
+
+/**
+ * TxQ Entry Structure
+ *
+ * BEWARE: Load values into this structure with correct endianess.
+ */
+struct bna_txq_entry {
+ union {
+ struct {
+ u8 reserved;
+ u8 num_vectors; /* number of vectors present */
+ bna_txq_wi_opcode_t opcode; /* Either */
+ /* BNA_TXQ_WI_SEND or */
+ /* BNA_TXQ_WI_SEND_LSO */
+ bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
+ u16 l4_hdr_size_n_offset;
+ u16 vlan_tag;
+ u16 lso_mss; /* Only 14 LSB are valid */
+ u32 frame_length; /* Only 24 LSB are valid */
+ } wi;
+
+ struct {
+ u16 reserved;
+ bna_txq_wi_opcode_t opcode; /* Must be */
+ /* BNA_TXQ_WI_EXTENSION */
+ u32 reserved2[3]; /* Place holder for */
+ /* removed vector (12 bytes) */
+ } wi_ext;
+ } hdr;
+ struct bna_txq_wi_vector vector[4];
+};
+#define wi_hdr hdr.wi
+#define wi_ext_hdr hdr.wi_ext
+
+/* RxQ Entry Structure */
+struct bna_rxq_entry { /* Rx-Buffer */
+ struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
+};
+
+typedef u32 bna_cq_e_flag_t;
+
+/* CQ Entry Structure */
+struct bna_cq_entry {
+ bna_cq_e_flag_t flags;
+ u16 vlan_tag;
+ u16 length;
+ u32 rss_hash;
+ u8 valid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 rxq_id;
+};
+
+#endif /* __BNA_HW_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfa_sm.h"
+#include "bfi.h"
+
+/**
+ * IB
+ */
+#define bna_ib_find_free_ibidx(_mask, _pos)\
+do {\
+ (_pos) = 0;\
+ while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
+ ((1 << (_pos)) & (_mask)))\
+ (_pos)++;\
+} while (0)
+
+#define bna_ib_count_ibidx(_mask, _count)\
+do {\
+ int pos = 0;\
+ (_count) = 0;\
+ while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
+ if ((1 << pos) & (_mask))\
+ (_count) = pos + 1;\
+ pos++;\
+ } \
+} while (0)
+
+#define bna_ib_select_segpool(_count, _q_idx)\
+do {\
+ int i;\
+ (_q_idx) = -1;\
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
+ if ((_count <= ibidx_pool[i].pool_entry_size)) {\
+ (_q_idx) = i;\
+ break;\
+ } \
+ } \
+} while (0)
+
+struct bna_ibidx_pool {
+ int pool_size;
+ int pool_entry_size;
+};
+init_ibidx_pool(ibidx_pool);
+
+static struct bna_intr *
+bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
+ int vector)
+{
+ struct bna_intr *intr;
+ struct list_head *qe;
+
+ list_for_each(qe, &ib_mod->intr_active_q) {
+ intr = (struct bna_intr *)qe;
+
+ if ((intr->intr_type == intr_type) &&
+ (intr->vector == vector)) {
+ intr->ref_count++;
+ return intr;
+ }
+ }
+
+ if (list_empty(&ib_mod->intr_free_q))
+ return NULL;
+
+ bfa_q_deq(&ib_mod->intr_free_q, &intr);
+ bfa_q_qe_init(&intr->qe);
+
+ intr->ref_count = 1;
+ intr->intr_type = intr_type;
+ intr->vector = vector;
+
+ list_add_tail(&intr->qe, &ib_mod->intr_active_q);
+
+ return intr;
+}
+
+static void
+bna_intr_put(struct bna_ib_mod *ib_mod,
+ struct bna_intr *intr)
+{
+ intr->ref_count--;
+
+ if (intr->ref_count == 0) {
+ intr->ib = NULL;
+ list_del(&intr->qe);
+ bfa_q_qe_init(&intr->qe);
+ list_add_tail(&intr->qe, &ib_mod->intr_free_q);
+ }
+}
+
+void
+bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+ int j;
+ int count;
+ u8 offset;
+ struct bna_doorbell_qset *qset;
+ unsigned long off;
+
+ ib_mod->bna = bna;
+
+ ib_mod->ib = (struct bna_ib *)
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
+ ib_mod->intr = (struct bna_intr *)
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
+ ib_mod->idx_seg = (struct bna_ibidx_seg *)
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ib_mod->ib_free_q);
+ INIT_LIST_HEAD(&ib_mod->intr_free_q);
+ INIT_LIST_HEAD(&ib_mod->intr_active_q);
+
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
+ INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
+
+ for (i = 0; i < BFI_MAX_IB; i++) {
+ ib_mod->ib[i].ib_id = i;
+
+ ib_mod->ib[i].ib_seg_host_addr_kva =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
+ ib_mod->ib[i].ib_seg_host_addr.lsb =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
+ ib_mod->ib[i].ib_seg_host_addr.msb =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
+
+ qset = (struct bna_doorbell_qset *)0;
+ off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
+ * (0x20 >> 2)]);
+ ib_mod->ib[i].door_bell.doorbell_addr = off +
+ BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
+
+ bfa_q_qe_init(&ib_mod->ib[i].qe);
+ list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
+
+ bfa_q_qe_init(&ib_mod->intr[i].qe);
+ list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
+ }
+
+ count = 0;
+ offset = 0;
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
+ for (j = 0; j < ibidx_pool[i].pool_size; j++) {
+ bfa_q_qe_init(&ib_mod->idx_seg[count]);
+ ib_mod->idx_seg[count].ib_seg_size =
+ ibidx_pool[i].pool_entry_size;
+ ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
+ list_add_tail(&ib_mod->idx_seg[count].qe,
+ &ib_mod->ibidx_seg_pool[i]);
+ count++;
+ offset += ibidx_pool[i].pool_entry_size;
+ }
+ }
+}
+
+void
+bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
+{
+ int i;
+ int j;
+ struct list_head *qe;
+
+ i = 0;
+ list_for_each(qe, &ib_mod->ib_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &ib_mod->intr_free_q)
+ i++;
+
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
+ j = 0;
+ list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
+ j++;
+ }
+
+ ib_mod->bna = NULL;
+}
+
+struct bna_ib *
+bna_ib_get(struct bna_ib_mod *ib_mod,
+ enum bna_intr_type intr_type,
+ int vector)
+{
+ struct bna_ib *ib;
+ struct bna_intr *intr;
+
+ if (intr_type == BNA_INTR_T_INTX)
+ vector = (1 << vector);
+
+ intr = bna_intr_get(ib_mod, intr_type, vector);
+ if (intr == NULL)
+ return NULL;
+
+ if (intr->ib) {
+ if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
+ bna_intr_put(ib_mod, intr);
+ return NULL;
+ }
+ intr->ib->ref_count++;
+ return intr->ib;
+ }
+
+ if (list_empty(&ib_mod->ib_free_q)) {
+ bna_intr_put(ib_mod, intr);
+ return NULL;
+ }
+
+ bfa_q_deq(&ib_mod->ib_free_q, &ib);
+ bfa_q_qe_init(&ib->qe);
+
+ ib->ref_count = 1;
+ ib->start_count = 0;
+ ib->idx_mask = 0;
+
+ ib->intr = intr;
+ ib->idx_seg = NULL;
+ intr->ib = ib;
+
+ ib->bna = ib_mod->bna;
+
+ return ib;
+}
+
+void
+bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
+{
+ bna_intr_put(ib_mod, ib->intr);
+
+ ib->ref_count--;
+
+ if (ib->ref_count == 0) {
+ ib->intr = NULL;
+ ib->bna = NULL;
+ list_add_tail(&ib->qe, &ib_mod->ib_free_q);
+ }
+}
+
+/* Returns index offset - starting from 0 */
+int
+bna_ib_reserve_idx(struct bna_ib *ib)
+{
+ struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+ struct bna_ibidx_seg *idx_seg;
+ int idx;
+ int num_idx;
+ int q_idx;
+
+ /* Find the first free index position */
+ bna_ib_find_free_ibidx(ib->idx_mask, idx);
+ if (idx == BFI_IBIDX_MAX_SEGSIZE)
+ return -1;
+
+ /*
+ * Calculate the total number of indexes held by this IB,
+ * including the index newly reserved above.
+ */
+ bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
+
+ /* See if there is a free space in the index segment held by this IB */
+ if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
+ ib->idx_mask |= (1 << idx);
+ return idx;
+ }
+
+ if (ib->start_count)
+ return -1;
+
+ /* Allocate a new segment */
+ bna_ib_select_segpool(num_idx, q_idx);
+ while (1) {
+ if (q_idx == BFI_IBIDX_TOTAL_POOLS)
+ return -1;
+ if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
+ break;
+ q_idx++;
+ }
+ bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
+ bfa_q_qe_init(&idx_seg->qe);
+
+ /* Free the old segment */
+ if (ib->idx_seg) {
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
+ list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
+ }
+
+ ib->idx_seg = idx_seg;
+
+ ib->idx_mask |= (1 << idx);
+
+ return idx;
+}
+
+void
+bna_ib_release_idx(struct bna_ib *ib, int idx)
+{
+ struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+ struct bna_ibidx_seg *idx_seg;
+ int num_idx;
+ int cur_q_idx;
+ int new_q_idx;
+
+ ib->idx_mask &= ~(1 << idx);
+
+ if (ib->start_count)
+ return;
+
+ bna_ib_count_ibidx(ib->idx_mask, num_idx);
+
+ /*
+ * Free the segment, if there are no more indexes in the segment
+ * held by this IB
+ */
+ if (!num_idx) {
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
+ list_add_tail(&ib->idx_seg->qe,
+ &ib_mod->ibidx_seg_pool[cur_q_idx]);
+ ib->idx_seg = NULL;
+ return;
+ }
+
+ /* See if we can move to a smaller segment */
+ bna_ib_select_segpool(num_idx, new_q_idx);
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
+ while (new_q_idx < cur_q_idx) {
+ if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
+ break;
+ new_q_idx++;
+ }
+ if (new_q_idx < cur_q_idx) {
+ /* Select the new smaller segment */
+ bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
+ bfa_q_qe_init(&idx_seg->qe);
+ /* Free the old segment */
+ list_add_tail(&ib->idx_seg->qe,
+ &ib_mod->ibidx_seg_pool[cur_q_idx]);
+ ib->idx_seg = idx_seg;
+ }
+}
+
+int
+bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
+{
+ if (ib->start_count)
+ return -1;
+
+ ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
+ ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
+ ib->ib_config.interpkt_count = ib_config->interpkt_count;
+ ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
+
+ ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
+ if (ib->intr->intr_type == BNA_INTR_T_MSIX)
+ ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
+
+ return 0;
+}
+
+void
+bna_ib_start(struct bna_ib *ib)
+{
+ struct bna_ib_blk_mem ib_cfg;
+ struct bna_ib_blk_mem *ib_mem;
+ u32 pg_num;
+ u32 intx_mask;
+ int i;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ ib->start_count++;
+
+ if (ib->start_count > 1)
+ return;
+
+ ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
+ ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
+
+ ib_cfg.clsc_n_ctrl_n_msix = (((u32)
+ ib->ib_config.coalescing_timeo << 16) |
+ ((u32)ib->ib_config.ctrl_flags << 8) |
+ (ib->intr->vector));
+ ib_cfg.ipkt_n_ent_n_idxof =
+ ((u32)
+ (ib->ib_config.interpkt_timeo & 0xf) << 16) |
+ ((u32)ib->idx_seg->ib_seg_size << 8) |
+ (ib->idx_seg->ib_idx_tbl_offset);
+ ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
+ ib->ib_config.interpkt_count << 24);
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
+ HQM_IB_RAM_BASE_OFFSET);
+ writel(pg_num, ib->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
+ HQM_IB_RAM_BASE_OFFSET);
+
+ ib_mem = (struct bna_ib_blk_mem *)0;
+ off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
+ writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
+ writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
+ writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
+ writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
+ writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
+
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->ib_config.coalescing_timeo, 0);
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
+ HQM_INDX_TBL_RAM_BASE_OFFSET);
+ writel(pg_num, ib->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
+ HQM_INDX_TBL_RAM_BASE_OFFSET);
+ for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
+ off = (unsigned long)
+ ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
+ writel(0, base_addr + off);
+ }
+
+ if (ib->intr->intr_type == BNA_INTR_T_INTX) {
+ bna_intx_disable(ib->bna, intx_mask);
+ intx_mask &= ~(ib->intr->vector);
+ bna_intx_enable(ib->bna, intx_mask);
+ }
+}
+
+void
+bna_ib_stop(struct bna_ib *ib)
+{
+ u32 intx_mask;
+
+ ib->start_count--;
+
+ if (ib->start_count == 0) {
+ writel(BNA_DOORBELL_IB_INT_DISABLE,
+ ib->door_bell.doorbell_addr);
+ if (ib->intr->intr_type == BNA_INTR_T_INTX) {
+ bna_intx_disable(ib->bna, intx_mask);
+ intx_mask |= (ib->intr->vector);
+ bna_intx_enable(ib->bna, intx_mask);
+ }
+ }
+}
+
+void
+bna_ib_fail(struct bna_ib *ib)
+{
+ ib->start_count = 0;
+}
+
+/**
+ * RXF
+ */
+static void rxf_enable(struct bna_rxf *rxf);
+static void rxf_disable(struct bna_rxf *rxf);
+static void __rxf_config_set(struct bna_rxf *rxf);
+static void __rxf_rit_set(struct bna_rxf *rxf);
+static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
+static int rxf_process_packet_filter(struct bna_rxf *rxf);
+static int rxf_clear_packet_filter(struct bna_rxf *rxf);
+static void rxf_reset_packet_filter(struct bna_rxf *rxf);
+static void rxf_cb_enabled(void *arg, int status);
+static void rxf_cb_disabled(void *arg, int status);
+static void bna_rxf_cb_stats_cleared(void *arg, int status);
+static void __rxf_enable(struct bna_rxf *rxf);
+static void __rxf_disable(struct bna_rxf *rxf);
+
+bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+
+static struct bfa_sm_table rxf_sm_table[] = {
+ {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
+ {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
+ {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
+ {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
+ {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
+ {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
+ {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
+ {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
+ {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
+};
+
+static void
+bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
+{
+ call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
+}
+
+static void
+bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_START:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
+ break;
+
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_FAIL:
+ /* No-op */
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ case RXF_E_STARTED:
+ case RXF_E_STOPPED:
+ case RXF_E_CAM_FLTR_RESP:
+ /**
+ * These events are received due to flushing of mbox
+ * when device fails
+ */
+ /* No-op */
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ case RXF_E_RESUME:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
+{
+ __rxf_config_set(rxf);
+ __rxf_rit_set(rxf);
+ rxf_enable(rxf);
+}
+
+static void
+bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ /**
+ * STOP is originated from bnad. When this happens,
+ * it can not be waiting for filter update
+ */
+ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
+ break;
+
+ case RXF_E_FAIL:
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ /* No-op */
+ break;
+
+ case RXF_E_STARTED:
+ /**
+ * Force rxf_process_filter() to go through initial
+ * config
+ */
+ if ((rxf->ucast_active_mac != NULL) &&
+ (rxf->ucast_pending_set == 0))
+ rxf->ucast_pending_set = 1;
+
+ if (rxf->rss_status == BNA_STATUS_T_ENABLED)
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ case RXF_E_RESUME:
+ rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
+{
+ if (!rxf_process_packet_filter(rxf)) {
+ /* No more pending CAM entries to update */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ /**
+ * STOP is originated from bnad. When this happens,
+ * it can not be waiting for filter update
+ */
+ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
+ break;
+
+ case RXF_E_FAIL:
+ rxf_reset_packet_filter(rxf);
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ /* No-op */
+ break;
+
+ case RXF_E_CAM_FLTR_RESP:
+ if (!rxf_process_packet_filter(rxf)) {
+ /* No more pending CAM entries to update */
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+ break;
+
+ case RXF_E_PAUSE:
+ case RXF_E_RESUME:
+ rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_started_entry(struct bna_rxf *rxf)
+{
+ call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
+
+ if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_PAUSE);
+ else
+ bfa_fsm_send_event(rxf, RXF_E_RESUME);
+ }
+
+}
+
+static void
+bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
+ /* Hack to get FSM start clearing CAM entries */
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
+ break;
+
+ case RXF_E_FAIL:
+ rxf_reset_packet_filter(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
+ break;
+
+ case RXF_E_RESUME:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
+{
+ /**
+ * Note: Do not add rxf_clear_packet_filter here.
+ * It will overstep mbox when this transition happens:
+ * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
+ */
+}
+
+static void
+bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ rxf_reset_packet_filter(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_RESP:
+ if (!rxf_clear_packet_filter(rxf)) {
+ /* No more pending CAM entries to clear */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
+ rxf_disable(rxf);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
+{
+ /**
+ * NOTE: Do not add rxf_disable here.
+ * It will overstep mbox when this transition happens:
+ * start_wait -> stop_wait on RXF_E_STOP event
+ */
+}
+
+static void
+bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STARTED:
+ /**
+ * This event is received due to abrupt transition from
+ * bna_rxf_sm_start_wait state on receiving
+ * RXF_E_STOP event
+ */
+ rxf_disable(rxf);
+ break;
+
+ case RXF_E_STOPPED:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ break;
+
+ case RXF_E_RESUME:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags &=
+ ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
+ __rxf_disable(rxf);
+}
+
+static void
+bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of disabling rxf, initiated by
+ * bnad.
+ */
+ call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STOPPED:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ break;
+
+ /*
+ * Since PAUSE/RESUME can only be sent by bnad, we don't expect
+ * any other event during these states
+ */
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
+ rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
+ __rxf_enable(rxf);
+}
+
+static void
+bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of disabling rxf, initiated by
+ * bnad.
+ */
+ call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STARTED:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ break;
+
+ /*
+ * Since PAUSE/RESUME can only be sent by bnad, we don't expect
+ * any other event during these states
+ */
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
+{
+ __bna_rxf_stat_clr(rxf);
+}
+
+static void
+bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ case RXF_E_STAT_CLEARED:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+__rxf_enable(struct bna_rxf *rxf)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+ ll_req.enable = 1;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ rxf_cb_enabled, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+__rxf_disable(struct bna_rxf *rxf)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+ ll_req.enable = 0;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ rxf_cb_disabled, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+__rxf_config_set(struct bna_rxf *rxf)
+{
+ u32 i;
+ struct bna_rss_mem *rss_mem;
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+ struct bna *bna = rxf->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ RSS_TABLE_BASE_OFFSET);
+
+ rss_mem = (struct bna_rss_mem *)0;
+
+ /* Configure RSS if required */
+ if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
+ /* configure RSS Table */
+ writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
+ bna->port_num, RSS_TABLE_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ /* temporarily disable RSS, while hash value is written */
+ off = (unsigned long)&rss_mem[0].type_n_hash;
+ writel(0, base_addr + off);
+
+ for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
+ off = (unsigned long)
+ &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
+ writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
+ base_addr + off);
+ }
+
+ off = (unsigned long)&rss_mem[0].type_n_hash;
+ writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
+ base_addr + off);
+ }
+
+ /* Configure RxF */
+ writel(BNA_GET_PAGE_NUM(
+ LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
+ RX_FNDB_RAM_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ RX_FNDB_RAM_BASE_OFFSET);
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
+
+ /* We always use RSS table 0 */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
+ writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
+ base_addr + off);
+
+ /* small large buffer enable/disable */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
+ writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
+ base_addr + off);
+
+ /* RIT offset, HDS forced offset, multicast RxQ Id */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
+ writel((rxf->rit_segment->rit_offset << 16) |
+ (rxf->forced_offset << 8) |
+ (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
+ base_addr + off);
+
+ /*
+ * default vlan tag, default function enable, strip vlan bytes,
+ * HDS type, header size
+ */
+
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
+ writel(((u32)rxf->default_vlan_tag << 16) |
+ (rxf->ctrl_flags &
+ (BNA_RXF_CF_DEFAULT_VLAN |
+ BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
+ BNA_RXF_CF_VLAN_STRIP)) |
+ (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
+ rxf->hds_cfg.header_size,
+ base_addr + off);
+}
+
+void
+__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bna *bna = rxf->rx->bna;
+ int i;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ if (status == BNA_STATUS_T_ENABLED) {
+ /* enable VLAN filtering on this function */
+ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
+ writel(rxf->vlan_filter_table[i],
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (bna->pcidev.pci_bar_kva, rxf->rxf_id,
+ i * 32));
+ }
+ } else {
+ /* disable VLAN filtering on this function */
+ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
+ writel(0xffffffff,
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (bna->pcidev.pci_bar_kva, rxf->rxf_id,
+ i * 32));
+ }
+ }
+}
+
+static void
+__rxf_rit_set(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ struct bna_rit_mem *rit_mem;
+ int i;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ FUNCTION_TO_RXQ_TRANSLATE);
+
+ rit_mem = (struct bna_rit_mem *)0;
+
+ writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
+ FUNCTION_TO_RXQ_TRANSLATE),
+ bna->regs.page_addr);
+
+ for (i = 0; i < rxf->rit_segment->rit_size; i++) {
+ off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
+ writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
+ rxf->rit_segment->rit[i].small_rxq_id,
+ base_addr + off);
+ }
+}
+
+static void
+__bna_rxf_stat_clr(struct bna_rxf *rxf)
+{
+ struct bfi_ll_stats_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = 0;
+ ll_req.txf_id_mask[0] = 0;
+ ll_req.txf_id_mask[1] = 0;
+
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_rxf_cb_stats_cleared, rxf);
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+rxf_enable(struct bna_rxf *rxf)
+{
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_STARTED);
+ else {
+ rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
+ __rxf_enable(rxf);
+ }
+}
+
+static void
+rxf_cb_enabled(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STARTED);
+}
+
+static void
+rxf_disable(struct bna_rxf *rxf)
+{
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_STOPPED);
+ else
+ rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
+ __rxf_disable(rxf);
+}
+
+static void
+rxf_cb_disabled(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STOPPED);
+}
+
+void
+rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
+}
+
+static void
+bna_rxf_cb_stats_cleared(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
+}
+
+void
+rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
+ const struct bna_mac *mac_addr)
+{
+ struct bfi_ll_mac_addr_req req;
+
+ bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
+
+ req.rxf_id = rxf->rxf_id;
+ memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
+ rxf_cb_cam_fltr_mbox_cmd, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static int
+rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Add multicast entries */
+ if (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ return 1;
+ }
+
+ /* Delete multicast entries previousely added */
+ if (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
+{
+ /* Apply the VLAN filter */
+ if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
+ rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
+ !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ }
+
+ /* Apply RSS configuration */
+ if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
+ rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
+ if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
+ /* RSS is being disabled */
+ rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
+ __rxf_rit_set(rxf);
+ __rxf_config_set(rxf);
+ } else {
+ /* RSS is being enabled or reconfigured */
+ rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
+ __rxf_rit_set(rxf);
+ __rxf_config_set(rxf);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Processes pending ucast, mcast entry addition/deletion and issues mailbox
+ * command. Also processes pending filter configuration - promiscuous mode,
+ * default mode, allmutli mode and issues mailbox command or directly applies
+ * to h/w
+ */
+static int
+rxf_process_packet_filter(struct bna_rxf *rxf)
+{
+ /* Set the default MAC first */
+ if (rxf->ucast_pending_set > 0) {
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
+ rxf->ucast_active_mac);
+ rxf->ucast_pending_set--;
+ return 1;
+ }
+
+ if (rxf_process_packet_filter_ucast(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_mcast(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_promisc(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_default(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_allmulti(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_vlan(rxf))
+ return 1;
+
+ return 0;
+}
+
+static int
+rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* 3. delete pending mcast entries */
+ if (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ return 1;
+ }
+
+ /* 4. clear active mcast entries; move them to pending_add_q */
+ if (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * In the rxf stop path, processes pending ucast/mcast delete queue and issues
+ * the mailbox command. Moves the active ucast/mcast entries to pending add q,
+ * so that they are added to CAM again in the rxf start path. Moves the current
+ * filter settings - promiscuous, default, allmutli - to pending filter
+ * configuration
+ */
+static int
+rxf_clear_packet_filter(struct bna_rxf *rxf)
+{
+ if (rxf_clear_packet_filter_ucast(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_mcast(rxf))
+ return 1;
+
+ /* 5. clear active default MAC in the CAM */
+ if (rxf->ucast_pending_set > 0)
+ rxf->ucast_pending_set = 0;
+
+ if (rxf_clear_packet_filter_promisc(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_default(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_allmulti(rxf))
+ return 1;
+
+ return 0;
+}
+
+static void
+rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* 3. Move active mcast entries to pending_add_q */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->mcast_pending_add_q);
+ }
+
+ /* 4. Throw away delete pending mcast entries */
+ while (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+}
+
+/**
+ * In the rxf fail path, throws away the ucast/mcast entries pending for
+ * deletion, moves all active ucast/mcast entries to pending queue so that
+ * they are added back to CAM in the rxf start path. Also moves the current
+ * filter configuration to pending filter configuration.
+ */
+static void
+rxf_reset_packet_filter(struct bna_rxf *rxf)
+{
+ rxf_reset_packet_filter_ucast(rxf);
+
+ rxf_reset_packet_filter_mcast(rxf);
+
+ /* 5. Turn off ucast set flag */
+ rxf->ucast_pending_set = 0;
+
+ rxf_reset_packet_filter_promisc(rxf);
+
+ rxf_reset_packet_filter_default(rxf);
+
+ rxf_reset_packet_filter_allmulti(rxf);
+}
+
+void
+bna_rxf_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config)
+{
+ struct list_head *qe;
+ struct bna_rxp *rxp;
+
+ /* rxf_id is initialized during rx_mod init */
+ rxf->rx = rx;
+
+ INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
+ rxf->ucast_pending_set = 0;
+ INIT_LIST_HEAD(&rxf->ucast_active_q);
+ rxf->ucast_active_mac = NULL;
+
+ INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
+ INIT_LIST_HEAD(&rxf->mcast_active_q);
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+
+ if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
+ rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
+
+ rxf->rxf_oper_state = (q_config->paused) ?
+ BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
+
+ bna_rxf_adv_init(rxf, rx, q_config);
+
+ rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
+ q_config->num_paths);
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ if (q_config->rxp_type == BNA_RXP_SINGLE)
+ rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
+ else
+ rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
+ break;
+ }
+
+ rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+ memset(rxf->vlan_filter_table, 0,
+ (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+}
+
+void
+bna_rxf_uninit(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac;
+
+ bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
+ rxf->rit_segment = NULL;
+
+ rxf->ucast_pending_set = 0;
+
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+
+ if (rxf->ucast_active_mac) {
+ bfa_q_qe_init(&rxf->ucast_active_mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
+ rxf->ucast_active_mac);
+ rxf->ucast_active_mac = NULL;
+ }
+
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ rxf->rx = NULL;
+}
+
+void
+bna_rxf_start(struct bna_rxf *rxf)
+{
+ rxf->start_cbfn = bna_rx_cb_rxf_started;
+ rxf->start_cbarg = rxf->rx;
+ rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
+ bfa_fsm_send_event(rxf, RXF_E_START);
+}
+
+void
+bna_rxf_stop(struct bna_rxf *rxf)
+{
+ rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
+ rxf->stop_cbarg = rxf->rx;
+ bfa_fsm_send_event(rxf, RXF_E_STOP);
+}
+
+void
+bna_rxf_fail(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags |= BNA_RXF_FL_FAILED;
+ bfa_fsm_send_event(rxf, RXF_E_FAIL);
+}
+
+int
+bna_rxf_state_get(struct bna_rxf *rxf)
+{
+ return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
+}
+
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->ucast_active_mac == NULL) {
+ rxf->ucast_active_mac =
+ bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ if (rxf->ucast_active_mac == NULL)
+ return BNA_CB_UCAST_CAM_FULL;
+ bfa_q_qe_init(&rxf->ucast_active_mac->qe);
+ }
+
+ memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
+ rxf->ucast_pending_set++;
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* Check if already added */
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ /* Check if pending addition */
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ return BNA_CB_MCAST_LIST_FULL;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, addr, ETH_ALEN);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->mcast_pending_del_q);
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ return BNA_CB_INVALID_MAC;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac;
+ struct bna_mac *mac1;
+ int skip;
+ int delete;
+ int need_hw_config = 0;
+ int i;
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
+ }
+
+ /* Schedule for addition */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ skip = 0;
+
+ /* Skip if already added */
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac1 = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
+ mac);
+ skip = 1;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
+ /* Skip if pending addition */
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac1 = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
+ mac);
+ skip = 1;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
+ need_hw_config = 1;
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ /**
+ * Delete the entries that are in the pending_add_q but not
+ * in the new list
+ */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
+ if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
+ delete = 0;
+ break;
+ }
+ mcaddr += ETH_ALEN;
+ }
+ if (delete)
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ else
+ list_add_tail(&mac->qe, &list_head);
+ }
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ /**
+ * Schedule entries for deletion that are in the active_q but not
+ * in the new list
+ */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
+ if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
+ delete = 0;
+ break;
+ }
+ mcaddr += ETH_ALEN;
+ }
+ if (delete) {
+ list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+ need_hw_config = 1;
+ } else {
+ list_add_tail(&mac->qe, &list_head);
+ }
+ }
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ return BNA_CB_MCAST_LIST_FULL;
+}
+
+void
+bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> 5);
+ int bit = (1 << (vlan_id & 0x1F));
+
+ rxf->vlan_filter_table[index] |= bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+void
+bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> 5);
+ int bit = (1 << (vlan_id & 0x1F));
+
+ rxf->vlan_filter_table[index] &= ~bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/**
+ * RX
+ */
+#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
+ struct bna_doorbell_qset *_qset; \
+ unsigned long off; \
+ (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
+ (q)->rcb->q_depth = (qdepth); \
+ (q)->rcb->unmap_q = unmapq_mem; \
+ (q)->rcb->rxq = (q); \
+ (q)->rcb->cq = &(rxp)->cq; \
+ (q)->rcb->bnad = (bna)->bnad; \
+ _qset = (struct bna_doorbell_qset *)0; \
+ off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
+ (q)->rcb->q_dbell = off + \
+ BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
+ (q)->rcb->id = _id; \
+} while (0)
+
+#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
+ (qcfg)->num_paths : ((qcfg)->num_paths * 2))
+
+#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
+ (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
+
+#define call_rx_stop_callback(rx, status) \
+ if ((rx)->stop_cbfn) { \
+ (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
+ (rx)->stop_cbfn = NULL; \
+ (rx)->stop_cbarg = NULL; \
+ }
+
+/*
+ * Since rx_enable is synchronous callback, there is no start_cbfn required.
+ * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
+ * for each rxpath.
+ */
+
+#define call_rx_disable_cbfn(rx, status) \
+ if ((rx)->disable_cbfn) { \
+ (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
+ status); \
+ (rx)->disable_cbfn = NULL; \
+ (rx)->disable_cbarg = NULL; \
+ } \
+
+#define rxqs_reqd(type, num_rxqs) \
+ (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
+
+#define rx_ib_fail(rx) \
+do { \
+ struct bna_rxp *rxp; \
+ struct list_head *qe; \
+ list_for_each(qe, &(rx)->rxp_q) { \
+ rxp = (struct bna_rxp *)qe; \
+ bna_ib_fail(rxp->cq.ib); \
+ } \
+} while (0)
+
+static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
+static void __bna_rxq_start(struct bna_rxq *rxq);
+static void __bna_cq_start(struct bna_cq *cq);
+static void bna_rit_create(struct bna_rx *rx);
+static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
+static void bna_rx_cb_rxq_stopped_all(void *arg);
+
+bfa_fsm_state_decl(bna_rx, stopped,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_start_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, started,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+
+static struct bfa_sm_table rx_sm_table[] = {
+ {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
+ {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
+ {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
+ {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
+ {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
+};
+
+static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ }
+
+ call_rx_stop_callback(rx, BNA_CB_SUCCESS);
+}
+
+static void bna_rx_sm_stopped(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_START:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
+ break;
+ case RX_E_STOP:
+ call_rx_stop_callback(rx, BNA_CB_SUCCESS);
+ break;
+ case RX_E_FAIL:
+ /* no-op */
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+
+}
+
+static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+
+ /* Setup the RIT */
+ bna_rit_create(rx);
+
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_start(rxp->cq.ib);
+ GET_RXQS(rxp, q0, q1);
+ q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
+ __bna_rxq_start(q0);
+ rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
+ if (q1) {
+ __bna_rxq_start(q1);
+ rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
+ }
+ __bna_cq_start(&rxp->cq);
+ }
+
+ bna_rxf_start(&rx->rxf);
+}
+
+static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ case RX_E_RXF_STARTED:
+ bfa_fsm_set_state(rx, bna_rx_sm_started);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_started_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ /* Start IB */
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_ack(&rxp->cq.ib->door_bell, 0);
+ }
+
+ bna_llport_admin_up(&rx->bna->port.llport);
+}
+
+void
+bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ bna_llport_admin_down(&rx->bna->port.llport);
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ case RX_E_STOP:
+ bna_llport_admin_down(&rx->bna->port.llport);
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
+{
+ bna_rxf_stop(&rx->rxf);
+}
+
+void
+bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_RXF_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
+ break;
+ case RX_E_RXF_STARTED:
+ /**
+ * RxF was in the process of starting up when
+ * RXF_E_STOP was issued. Ignore this event
+ */
+ break;
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+
+}
+
+void
+bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp = NULL;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct list_head *qe;
+ u32 rxq_mask[2] = {0, 0};
+
+ /* Only one call to multi-rxq-stop for all RXPs in this RX */
+ bfa_wc_up(&rx->rxq_stop_wc);
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ GET_RXQS(rxp, q0, q1);
+ if (q0->rxq_id < 32)
+ rxq_mask[0] |= ((u32)1 << q0->rxq_id);
+ else
+ rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
+ if (q1) {
+ if (q1->rxq_id < 32)
+ rxq_mask[0] |= ((u32)1 << q1->rxq_id);
+ else
+ rxq_mask[1] |= ((u32)
+ 1 << (q1->rxq_id - 32));
+ }
+ }
+
+ __bna_multi_rxq_stop(rxp, rxq_mask);
+}
+
+void
+bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ struct bna_rxp *rxp = NULL;
+ struct list_head *qe;
+
+ switch (event) {
+ case RX_E_RXQ_STOPPED:
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ bna_ib_stop(rxp->cq.ib);
+ }
+ /* Fall through */
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
+{
+ struct bfi_ll_q_stop_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
+ ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
+ ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
+ bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_rx_cb_multi_rxq_stopped, rxp);
+ bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
+}
+
+void
+__bna_rxq_start(struct bna_rxq *rxq)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_rxq_mem rxq_cfg, *rxq_mem;
+ struct bna_dma_addr cur_q_addr;
+ /* struct bna_doorbell_qset *qset; */
+ struct bna_qpt *qpt;
+ u32 pg_num;
+ struct bna *bna = rxq->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ qpt = &rxq->qpt;
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
+ rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
+ (qpt->page_size >> 2);
+ rxq_cfg.sg_n_cq_n_cns_ptr =
+ ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
+ rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
+ BNA_Q_IDLE_STATE;
+ rxq_cfg.next_qid = 0x0 | (0x3 << 8);
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ writel(pg_num, bna->regs.page_addr);
+
+ /* Write to h/w */
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+
+ q_mem = (struct bna_rxtx_q_mem *)0;
+ rxq_mem = &q_mem[rxq->rxq_id].rxq;
+
+ off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
+ writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
+ writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->cur_q_entry_lo;
+ writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->cur_q_entry_hi;
+ writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
+ writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->entry_n_pg_size;
+ writel(rxq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
+ writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
+ writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->next_qid;
+ writel(rxq_cfg.next_qid, base_addr + off);
+
+ rxq->rcb->producer_index = 0;
+ rxq->rcb->consumer_index = 0;
+}
+
+void
+__bna_cq_start(struct bna_cq *cq)
+{
+ struct bna_cq_mem cq_cfg, *cq_mem;
+ const struct bna_qpt *qpt;
+ struct bna_dma_addr cur_q_addr;
+ u32 pg_num;
+ struct bna *bna = cq->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ qpt = &cq->qpt;
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ /*
+ * Fill out structure, to be subsequently written
+ * to hardware
+ */
+ cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
+ cq_cfg.entry_n_pg_size =
+ ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
+ cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
+ ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
+ cq_cfg.q_state = BNA_Q_IDLE_STATE;
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
+ HQM_CQ_RAM_BASE_OFFSET);
+
+ writel(pg_num, bna->regs.page_addr);
+
+ /* H/W write */
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ HQM_CQ_RAM_BASE_OFFSET);
+
+ cq_mem = (struct bna_cq_mem *)0;
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
+ writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
+ writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
+ writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
+ writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
+ writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
+ writel(cq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
+ writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].q_state;
+ writel(cq_cfg.q_state, base_addr + off);
+
+ cq->ccb->producer_index = 0;
+ *(cq->ccb->hw_producer_index) = 0;
+}
+
+void
+bna_rit_create(struct bna_rx *rx)
+{
+ struct list_head *qe_rxp;
+ struct bna *bna;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ int offset;
+
+ bna = rx->bna;
+
+ offset = 0;
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ GET_RXQS(rxp, q0, q1);
+ rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
+ rx->rxf.rit_segment->rit[offset].small_rxq_id =
+ (q1 ? q1->rxq_id : 0);
+ offset++;
+ }
+}
+
+int
+_rx_can_satisfy(struct bna_rx_mod *rx_mod,
+ struct bna_rx_config *rx_cfg)
+{
+ if ((rx_mod->rx_free_count == 0) ||
+ (rx_mod->rxp_free_count == 0) ||
+ (rx_mod->rxq_free_count == 0))
+ return 0;
+
+ if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < rx_cfg->num_paths))
+ return 0;
+ } else {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
+ return 0;
+ }
+
+ if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
+ return 0;
+
+ return 1;
+}
+
+struct bna_rxq *
+_get_free_rxq(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rxq *rxq = NULL;
+ struct list_head *qe = NULL;
+
+ bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ if (qe) {
+ rx_mod->rxq_free_count--;
+ rxq = (struct bna_rxq *)qe;
+ }
+ return rxq;
+}
+
+void
+_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
+{
+ bfa_q_qe_init(&rxq->qe);
+ list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+}
+
+struct bna_rxp *
+_get_free_rxp(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rxp *rxp = NULL;
+
+ bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ if (qe) {
+ rx_mod->rxp_free_count--;
+
+ rxp = (struct bna_rxp *)qe;
+ }
+
+ return rxp;
+}
+
+void
+_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
+{
+ bfa_q_qe_init(&rxp->qe);
+ list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+}
+
+struct bna_rx *
+_get_free_rx(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rx *rx = NULL;
+
+ bfa_q_deq(&rx_mod->rx_free_q, &qe);
+ if (qe) {
+ rx_mod->rx_free_count--;
+
+ rx = (struct bna_rx *)qe;
+ bfa_q_qe_init(qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ }
+
+ return rx;
+}
+
+void
+_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
+{
+ bfa_q_qe_init(&rx->qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+}
+
+void
+_rx_init(struct bna_rx *rx, struct bna *bna)
+{
+ rx->bna = bna;
+ rx->rx_flags = 0;
+
+ INIT_LIST_HEAD(&rx->rxp_q);
+
+ rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
+ rx->rxq_stop_wc.wc_cbarg = rx;
+ rx->rxq_stop_wc.wc_count = 0;
+
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+}
+
+void
+_rxp_add_rxqs(struct bna_rxp *rxp,
+ struct bna_rxq *q0,
+ struct bna_rxq *q1)
+{
+ switch (rxp->type) {
+ case BNA_RXP_SINGLE:
+ rxp->rxq.single.only = q0;
+ rxp->rxq.single.reserved = NULL;
+ break;
+ case BNA_RXP_SLR:
+ rxp->rxq.slr.large = q0;
+ rxp->rxq.slr.small = q1;
+ break;
+ case BNA_RXP_HDS:
+ rxp->rxq.hds.data = q0;
+ rxp->rxq.hds.hdr = q1;
+ break;
+ default:
+ break;
+ }
+}
+
+void
+_rxq_qpt_init(struct bna_rxq *rxq,
+ struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxq->qpt.page_count = page_count;
+ rxq->qpt.page_size = page_size;
+
+ rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxq->qpt.page_count; i++) {
+ rxq->rcb->sw_qpt[i] = page_mem[i].kva;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+void
+_rxp_cqpt_setup(struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxp->cq.qpt.page_count = page_count;
+ rxp->cq.qpt.page_size = page_size;
+
+ rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxp->cq.qpt.page_count; i++) {
+ rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+void
+_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
+{
+ list_add_tail(&rxp->qe, &rx->rxp_q);
+}
+
+void
+_init_rxmod_queues(struct bna_rx_mod *rx_mod)
+{
+ INIT_LIST_HEAD(&rx_mod->rx_free_q);
+ INIT_LIST_HEAD(&rx_mod->rxq_free_q);
+ INIT_LIST_HEAD(&rx_mod->rxp_free_q);
+ INIT_LIST_HEAD(&rx_mod->rx_active_q);
+
+ rx_mod->rx_free_count = 0;
+ rx_mod->rxq_free_count = 0;
+ rx_mod->rxp_free_count = 0;
+}
+
+void
+_rx_ctor(struct bna_rx *rx, int id)
+{
+ bfa_q_qe_init(&rx->qe);
+ INIT_LIST_HEAD(&rx->rxp_q);
+ rx->bna = NULL;
+
+ rx->rxf.rxf_id = id;
+
+ /* FIXME: mbox_qe ctor()?? */
+ bfa_q_qe_init(&rx->mbox_qe.qe);
+
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+}
+
+void
+bna_rx_cb_multi_rxq_stopped(void *arg, int status)
+{
+ struct bna_rxp *rxp = (struct bna_rxp *)arg;
+
+ bfa_wc_down(&rxp->rx->rxq_stop_wc);
+}
+
+void
+bna_rx_cb_rxq_stopped_all(void *arg)
+{
+ struct bna_rx *rx = (struct bna_rx *)arg;
+
+ bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
+}
+
+void
+bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ bfa_wc_down(&rx_mod->rx_stop_wc);
+}
+
+void
+bna_rx_mod_cb_rx_stopped_all(void *arg)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ if (rx_mod->stop_cbfn)
+ rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
+ rx_mod->stop_cbfn = NULL;
+}
+
+void
+bna_rx_start(struct bna_rx *rx)
+{
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ if (rx->rx_flags & BNA_RX_F_ENABLE)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+void
+bna_rx_stop(struct bna_rx *rx)
+{
+ rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
+ if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
+ else {
+ rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
+ rx->stop_cbarg = &rx->bna->rx_mod;
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+void
+bna_rx_fail(struct bna_rx *rx)
+{
+ /* Indicate port is not enabled, and failed */
+ rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
+ rx->rx_flags |= BNA_RX_F_PORT_FAILED;
+ bfa_fsm_send_event(rx, RX_E_FAIL);
+}
+
+void
+bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
+ if (rx->rxf.rxf_id < 32)
+ rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
+ else
+ rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
+ 1 << (rx->rxf.rxf_id - 32));
+}
+
+void
+bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
+ if (rx->rxf.rxf_id < 32)
+ rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
+ else
+ rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
+ 1 << (rx->rxf.rxf_id - 32);
+}
+
+void
+bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
+ if (type == BNA_RX_T_LOOPBACK)
+ rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_start(rx);
+ }
+}
+
+void
+bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
+
+ /**
+ * Before calling bna_rx_stop(), increment rx_stop_wc as many times
+ * as we are going to call bna_rx_stop
+ */
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bfa_wc_up(&rx_mod->rx_stop_wc);
+ }
+
+ if (rx_mod->rx_stop_wc.wc_count == 0) {
+ rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
+ rx_mod->stop_cbfn = NULL;
+ return;
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_stop(rx);
+ }
+}
+
+void
+bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ bna_rx_fail(rx);
+ }
+}
+
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int index;
+ struct bna_rx *rx_ptr;
+ struct bna_rxp *rxp_ptr;
+ struct bna_rxq *rxq_ptr;
+
+ rx_mod->bna = bna;
+ rx_mod->flags = 0;
+
+ rx_mod->rx = (struct bna_rx *)
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxp = (struct bna_rxp *)
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxq = (struct bna_rxq *)
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ /* Initialize the queues */
+ _init_rxmod_queues(rx_mod);
+
+ /* Build RX queues */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rx_ptr = &rx_mod->rx[index];
+ _rx_ctor(rx_ptr, index);
+ list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+ }
+
+ /* build RX-path queue */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rxp_ptr = &rx_mod->rxp[index];
+ rxp_ptr->cq.cq_id = index;
+ bfa_q_qe_init(&rxp_ptr->qe);
+ list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+ }
+
+ /* build RXQ queue */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rxq_ptr = &rx_mod->rxq[index];
+ rxq_ptr->rxq_id = index;
+
+ bfa_q_qe_init(&rxq_ptr->qe);
+ list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+ }
+
+ rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
+ rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
+ rx_mod->rx_stop_wc.wc_count = 0;
+}
+
+void
+bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxp_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxq_free_q)
+ i++;
+
+ rx_mod->bna = NULL;
+}
+
+int
+bna_rx_state_get(struct bna_rx *rx)
+{
+ return bfa_sm_to_state(rx_sm_table, rx->fsm);
+}
+
+void
+bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
+{
+ u32 cq_size, hq_size, dq_size;
+ u32 cpage_count, hpage_count, dpage_count;
+ struct bna_mem_info *mem_info;
+ u32 cq_depth;
+ u32 hq_depth;
+ u32 dq_depth;
+
+ dq_depth = q_cfg->q_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ cq_depth = dq_depth + hq_depth;
+
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_size = cq_depth * BFI_CQ_WI_SIZE;
+ cq_size = ALIGN(cq_size, PAGE_SIZE);
+ cpage_count = SIZE_TO_PAGES(cq_size);
+
+ BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_size = dq_depth * BFI_RXQ_WI_SIZE;
+ dq_size = ALIGN(dq_size, PAGE_SIZE);
+ dpage_count = SIZE_TO_PAGES(dq_size);
+
+ if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
+ BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_size = hq_depth * BFI_RXQ_WI_SIZE;
+ hq_size = ALIGN(hq_size, PAGE_SIZE);
+ hpage_count = SIZE_TO_PAGES(hq_size);
+ } else {
+ hpage_count = 0;
+ }
+
+ /* CCB structures */
+ res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_ccb);
+ mem_info->num = q_cfg->num_paths;
+
+ /* RCB structures */
+ res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_rcb);
+ mem_info->num = BNA_GET_RXQS(q_cfg);
+
+ /* Completion QPT */
+ res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Completion s/w QPT */
+ res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = cpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Completion QPT pages */
+ res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = cpage_count * q_cfg->num_paths;
+
+ /* Data QPTs */
+ res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Data s/w QPTs */
+ res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = dpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Data QPT pages */
+ res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = dpage_count * q_cfg->num_paths;
+
+ /* Hdr QPTs */
+ res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ /* Hdr s/w QPTs */
+ res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = hpage_count * sizeof(void *);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ /* Hdr QPT pages */
+ res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = (hpage_count ? PAGE_SIZE : 0);
+ mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
+
+ /* RX Interrupts */
+ res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
+}
+
+struct bna_rx *
+bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info,
+ void *priv)
+{
+ struct bna_rx_mod *rx_mod = &bna->rx_mod;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0;
+ struct bna_rxq *q1;
+ struct bna_intr_info *intr_info;
+ u32 page_count;
+ struct bna_mem_descr *ccb_mem;
+ struct bna_mem_descr *rcb_mem;
+ struct bna_mem_descr *unmapq_mem;
+ struct bna_mem_descr *cqpt_mem;
+ struct bna_mem_descr *cswqpt_mem;
+ struct bna_mem_descr *cpage_mem;
+ struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
+ struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
+ struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
+ struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
+ struct bna_mem_descr *hpage_mem; /* hdr page mem */
+ struct bna_mem_descr *dpage_mem; /* data page mem */
+ int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
+ int dpage_count, hpage_count, rcb_idx;
+ struct bna_ib_config ibcfg;
+ /* Fail if we don't have enough RXPs, RXQs */
+ if (!_rx_can_satisfy(rx_mod, rx_cfg))
+ return NULL;
+
+ /* Initialize resource pointers */
+ intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
+ rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
+ unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
+ cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
+ cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
+ hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
+ dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
+ hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
+ dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
+ hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
+ dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
+
+ /* Compute q depth & page count */
+ page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+ /* Get RX pointer */
+ rx = _get_free_rx(rx_mod);
+ _rx_init(rx, bna);
+ rx->priv = priv;
+ rx->type = rx_cfg->rx_type;
+
+ rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
+ rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
+ rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
+ rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
+ rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
+
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
+ switch (rx->type) {
+ case BNA_RX_T_REGULAR:
+ if (!(rx->bna->rx_mod.flags &
+ BNA_RX_MOD_F_PORT_LOOPBACK))
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ break;
+ case BNA_RX_T_LOOPBACK:
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ break;
+ }
+ }
+
+ for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
+ rxp = _get_free_rxp(rx_mod);
+ rxp->type = rx_cfg->rxp_type;
+ rxp->rx = rx;
+ rxp->cq.rx = rx;
+
+ /* Get required RXQs, and queue them to rx-path */
+ q0 = _get_free_rxq(rx_mod);
+ if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
+ q1 = NULL;
+ else
+ q1 = _get_free_rxq(rx_mod);
+
+ /* Initialize IB */
+ if (1 == intr_info->num) {
+ rxp->cq.ib = bna_ib_get(&bna->ib_mod,
+ intr_info->intr_type,
+ intr_info->idl[0].vector);
+ rxp->vector = intr_info->idl[0].vector;
+ } else {
+ rxp->cq.ib = bna_ib_get(&bna->ib_mod,
+ intr_info->intr_type,
+ intr_info->idl[i].vector);
+
+ /* Map the MSI-x vector used for this RXP */
+ rxp->vector = intr_info->idl[i].vector;
+ }
+
+ rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
+
+ ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
+ ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
+ ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
+ ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
+
+ ret = bna_ib_config(rxp->cq.ib, &ibcfg);
+
+ /* Link rxqs to rxp */
+ _rxp_add_rxqs(rxp, q0, q1);
+
+ /* Link rxp to rx */
+ _rx_add_rxp(rx, rxp);
+
+ q0->rx = rx;
+ q0->rxp = rxp;
+
+ /* Initialize RCB for the large / data q */
+ q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
+ (void *)unmapq_mem[rcb_idx].kva);
+ rcb_idx++;
+ (q0)->rx_packets = (q0)->rx_bytes = 0;
+ (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
+
+ /* Initialize RXQs */
+ _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
+ &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
+ q0->rcb->page_idx = dpage_idx;
+ q0->rcb->page_count = dpage_count;
+ dpage_idx += dpage_count;
+
+ /* Call bnad to complete rcb setup */
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q0->rcb);
+
+ if (q1) {
+ q1->rx = rx;
+ q1->rxp = rxp;
+
+ q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
+ (void *)unmapq_mem[rcb_idx].kva);
+ rcb_idx++;
+ (q1)->buffer_size = (rx_cfg)->small_buff_size;
+ (q1)->rx_packets = (q1)->rx_bytes = 0;
+ (q1)->rx_packets_with_error =
+ (q1)->rxbuf_alloc_failed = 0;
+
+ _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
+ &hqpt_mem[i], &hsqpt_mem[i],
+ &hpage_mem[hpage_idx]);
+ q1->rcb->page_idx = hpage_idx;
+ q1->rcb->page_count = hpage_count;
+ hpage_idx += hpage_count;
+
+ /* Call bnad to complete rcb setup */
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q1->rcb);
+ }
+ /* Setup RXP::CQ */
+ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
+ _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
+ &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
+ rxp->cq.ccb->page_idx = cpage_idx;
+ rxp->cq.ccb->page_count = page_count;
+ cpage_idx += page_count;
+
+ rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
+ rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
+
+ rxp->cq.ccb->producer_index = 0;
+ rxp->cq.ccb->q_depth = rx_cfg->q_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q_depth);
+ rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
+ rxp->cq.ccb->rcb[0] = q0->rcb;
+ if (q1)
+ rxp->cq.ccb->rcb[1] = q1->rcb;
+ rxp->cq.ccb->cq = &rxp->cq;
+ rxp->cq.ccb->bnad = bna->bnad;
+ rxp->cq.ccb->hw_producer_index =
+ ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
+ (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
+ *(rxp->cq.ccb->hw_producer_index) = 0;
+ rxp->cq.ccb->intr_type = intr_info->intr_type;
+ rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ rxp->cq.ccb->rx_coalescing_timeo =
+ rxp->cq.ib->ib_config.coalescing_timeo;
+ rxp->cq.ccb->id = i;
+
+ /* Call bnad to complete CCB setup */
+ if (rx->ccb_setup_cbfn)
+ rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
+
+ } /* for each rx-path */
+
+ bna_rxf_init(&rx->rxf, rx, rx_cfg);
+
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+
+ return rx;
+}
+
+void
+bna_rx_destroy(struct bna_rx *rx)
+{
+ struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
+ struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ bna_rxf_uninit(&rx->rxf);
+
+ while (!list_empty(&rx->rxp_q)) {
+ bfa_q_deq(&rx->rxp_q, &rxp);
+ GET_RXQS(rxp, q0, q1);
+ /* Callback to bnad for destroying RCB */
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
+ q0->rcb = NULL;
+ q0->rxp = NULL;
+ q0->rx = NULL;
+ _put_free_rxq(rx_mod, q0);
+ if (q1) {
+ /* Callback to bnad for destroying RCB */
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
+ q1->rcb = NULL;
+ q1->rxp = NULL;
+ q1->rx = NULL;
+ _put_free_rxq(rx_mod, q1);
+ }
+ rxp->rxq.slr.large = NULL;
+ rxp->rxq.slr.small = NULL;
+ if (rxp->cq.ib) {
+ if (rxp->cq.ib_seg_offset != 0xff)
+ bna_ib_release_idx(rxp->cq.ib,
+ rxp->cq.ib_seg_offset);
+ bna_ib_put(ib_mod, rxp->cq.ib);
+ rxp->cq.ib = NULL;
+ }
+ /* Callback to bnad for destroying CCB */
+ if (rx->ccb_destroy_cbfn)
+ rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ rxp->cq.ccb = NULL;
+ rxp->rx = NULL;
+ _put_free_rxp(rx_mod, rxp);
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ if (qe == &rx->qe) {
+ list_del(&rx->qe);
+ bfa_q_qe_init(&rx->qe);
+ break;
+ }
+ }
+
+ rx->bna = NULL;
+ rx->priv = NULL;
+ _put_free_rx(rx_mod, rx);
+}
+
+void
+bna_rx_enable(struct bna_rx *rx)
+{
+ if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ return;
+
+ rx->rx_flags |= BNA_RX_F_ENABLE;
+ if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+void
+bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ /* h/w should not be accessed. Treat we're stopped */
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ } else {
+ rx->stop_cbfn = cbfn;
+ rx->stop_cbarg = rx->bna->bnad;
+
+ rx->rx_flags &= ~BNA_RX_F_ENABLE;
+
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+/**
+ * TX
+ */
+#define call_tx_stop_cbfn(tx, status)\
+do {\
+ if ((tx)->stop_cbfn)\
+ (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
+ (tx)->stop_cbfn = NULL;\
+ (tx)->stop_cbarg = NULL;\
+} while (0)
+
+#define call_tx_prio_change_cbfn(tx, status)\
+do {\
+ if ((tx)->prio_change_cbfn)\
+ (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
+ (tx)->prio_change_cbfn = NULL;\
+} while (0)
+
+static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
+ enum bna_cb_status status);
+static void bna_tx_cb_txq_stopped(void *arg, int status);
+static void bna_tx_cb_stats_cleared(void *arg, int status);
+static void __bna_tx_stop(struct bna_tx *tx);
+static void __bna_tx_start(struct bna_tx *tx);
+static void __bna_txf_stat_clr(struct bna_tx *tx);
+
+enum bna_tx_event {
+ TX_E_START = 1,
+ TX_E_STOP = 2,
+ TX_E_FAIL = 3,
+ TX_E_TXQ_STOPPED = 4,
+ TX_E_PRIO_CHANGE = 5,
+ TX_E_STAT_CLEARED = 6,
+};
+
+enum bna_tx_state {
+ BNA_TX_STOPPED = 1,
+ BNA_TX_STARTED = 2,
+ BNA_TX_TXQ_STOP_WAIT = 3,
+ BNA_TX_PRIO_STOP_WAIT = 4,
+ BNA_TX_STAT_CLR_WAIT = 5,
+};
+
+bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
+ enum bna_tx_event);
+
+static struct bfa_sm_table tx_sm_table[] = {
+ {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
+ {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
+ {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
+ {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
+ {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
+};
+
+static void
+bna_tx_sm_stopped_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
+}
+
+static void
+bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_START:
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_FAIL:
+ /* No-op */
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ /**
+ * This event is received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_started_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ __bna_tx_start(tx);
+
+ /* Start IB */
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_ack(&txq->ib->door_bell, 0);
+ }
+}
+
+static void
+bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
+ __bna_tx_stop(tx);
+ break;
+
+ case TX_E_FAIL:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_fail(txq->ib);
+ (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(txq->ib);
+ }
+ bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
+{
+ __bna_tx_stop(tx);
+}
+
+static void
+bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
+ break;
+
+ case TX_E_FAIL:
+ call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(txq->ib);
+ (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
+{
+ __bna_txf_stat_clr(tx);
+}
+
+static void
+bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_FAIL:
+ case TX_E_STAT_CLEARED:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_txq_mem txq_cfg;
+ struct bna_txq_mem *txq_mem;
+ struct bna_dma_addr cur_q_addr;
+ u32 pg_num;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ /* Fill out structure, to be subsequently written to hardware */
+ txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
+ txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
+ cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
+ txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
+
+ txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
+ (txq->qpt.page_size >> 2);
+ txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
+ ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
+
+ txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
+ txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
+ (txq->priority & 0x3));
+ txq_cfg.wvc_n_cquota_n_rquota =
+ ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
+ (BFI_TX_MAX_WRR_QUOTA & 0xfff));
+
+ /* Setup the page and write to H/W */
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ writel(pg_num, tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ q_mem = (struct bna_rxtx_q_mem *)0;
+ txq_mem = &q_mem[txq->txq_id].txq;
+
+ /*
+ * The following 4 lines, is a hack b'cos the H/W needs to read
+ * these DMA addresses as little endian
+ */
+
+ off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
+ writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
+ writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&txq_mem->cur_q_entry_lo;
+ writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&txq_mem->cur_q_entry_hi;
+ writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
+ writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&txq_mem->entry_n_pg_size;
+ writel(txq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
+ writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
+ writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
+
+ off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
+ writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
+
+ off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
+ writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
+
+ txq->tcb->producer_index = 0;
+ txq->tcb->consumer_index = 0;
+ *(txq->tcb->hw_consumer_index) = 0;
+
+}
+
+static void
+__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
+{
+ struct bfi_ll_q_stop_req ll_req;
+ u32 bit_mask[2] = {0, 0};
+ if (txq->txq_id < 32)
+ bit_mask[0] = (u32)1 << txq->txq_id;
+ else
+ bit_mask[1] = (u32)1 << (txq->txq_id - 32);
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+ ll_req.q_id_mask[0] = htonl(bit_mask[0]);
+ ll_req.q_id_mask[1] = htonl(bit_mask[1]);
+
+ bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_tx_cb_txq_stopped, tx);
+
+ bna_mbox_send(tx->bna, &tx->mbox_qe);
+}
+
+static void
+__bna_txf_start(struct bna_tx *tx)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ struct bna_txf *txf = &tx->txf;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
+ tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ TX_FNDB_RAM_BASE_OFFSET);
+
+ tx_fndb = (struct bna_tx_fndb_ram *)0;
+ off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
+
+ writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
+ base_addr + off);
+
+ if (tx->txf.txf_id < 32)
+ tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
+ else
+ tx->bna->tx_mod.txf_bmap[1] |= ((u32)
+ 1 << (tx->txf.txf_id - 32));
+}
+
+static void
+__bna_txf_stop(struct bna_tx *tx)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ u32 page_num;
+ u32 ctl_flags;
+ struct bna_txf *txf = &tx->txf;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ /* retrieve the running txf_flags & turn off enable bit */
+ page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
+ writel(page_num, tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ TX_FNDB_RAM_BASE_OFFSET);
+ tx_fndb = (struct bna_tx_fndb_ram *)0;
+ off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
+
+ ctl_flags = readl(base_addr + off);
+ ctl_flags &= ~BFI_TXF_CF_ENABLE;
+
+ writel(ctl_flags, base_addr + off);
+
+ if (tx->txf.txf_id < 32)
+ tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
+ else
+ tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
+ 1 << (tx->txf.txf_id - 32));
+}
+
+static void
+__bna_txf_stat_clr(struct bna_tx *tx)
+{
+ struct bfi_ll_stats_req ll_req;
+ u32 txf_bmap[2] = {0, 0};
+ if (tx->txf.txf_id < 32)
+ txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
+ else
+ txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = 0;
+ ll_req.rxf_id_mask[0] = 0;
+ ll_req.rxf_id_mask[1] = 0;
+ ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
+ ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
+
+ bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_tx_cb_stats_cleared, tx);
+ bna_mbox_send(tx->bna, &tx->mbox_qe);
+}
+
+static void
+__bna_tx_start(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_start(txq->ib);
+ __bna_txq_start(tx, txq);
+ }
+
+ __bna_txf_start(tx);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->tcb->priority = txq->priority;
+ (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+}
+
+static void
+__bna_tx_stop(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ __bna_txf_stop(tx);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bfa_wc_up(&tx->txq_stop_wc);
+ }
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ __bna_txq_stop(tx, txq);
+ }
+}
+
+static void
+bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ txq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ txq->qpt.page_count = page_count;
+ txq->qpt.page_size = page_size;
+
+ txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < page_count; i++) {
+ txq->tcb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+static void
+bna_tx_free(struct bna_tx *tx)
+{
+ struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
+ struct bna_txq *txq;
+ struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
+ struct list_head *qe;
+
+ while (!list_empty(&tx->txq_q)) {
+ bfa_q_deq(&tx->txq_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ if (txq->ib) {
+ if (txq->ib_seg_offset != -1)
+ bna_ib_release_idx(txq->ib,
+ txq->ib_seg_offset);
+ bna_ib_put(ib_mod, txq->ib);
+ txq->ib = NULL;
+ }
+ txq->tcb = NULL;
+ txq->tx = NULL;
+ list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ if (qe == &tx->qe) {
+ list_del(&tx->qe);
+ bfa_q_qe_init(&tx->qe);
+ break;
+ }
+ }
+
+ tx->bna = NULL;
+ tx->priv = NULL;
+ list_add_tail(&tx->qe, &tx_mod->tx_free_q);
+}
+
+static void
+bna_tx_cb_txq_stopped(void *arg, int status)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+ bfa_wc_down(&tx->txq_stop_wc);
+}
+
+static void
+bna_tx_cb_txq_stopped_all(void *arg)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
+}
+
+static void
+bna_tx_cb_stats_cleared(void *arg, int status)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+
+ bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
+}
+
+static void
+bna_tx_start(struct bna_tx *tx)
+{
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ if (tx->flags & BNA_TX_F_ENABLED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+static void
+bna_tx_stop(struct bna_tx *tx)
+{
+ tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
+ tx->stop_cbarg = &tx->bna->tx_mod;
+
+ tx->flags &= ~BNA_TX_F_PORT_STARTED;
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+static void
+bna_tx_fail(struct bna_tx *tx)
+{
+ tx->flags &= ~BNA_TX_F_PORT_STARTED;
+ bfa_fsm_send_event(tx, TX_E_FAIL);
+}
+
+void
+bna_tx_prio_changed(struct bna_tx *tx, int prio)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->priority = prio;
+ }
+
+ bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
+}
+
+static void
+bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
+{
+ if (cee_link)
+ tx->flags |= BNA_TX_F_PRIO_LOCK;
+ else
+ tx->flags &= ~BNA_TX_F_PRIO_LOCK;
+}
+
+static void
+bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ bfa_wc_down(&tx_mod->tx_stop_wc);
+}
+
+static void
+bna_tx_mod_cb_tx_stopped_all(void *arg)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ if (tx_mod->stop_cbfn)
+ tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
+ tx_mod->stop_cbfn = NULL;
+}
+
+void
+bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
+{
+ u32 q_size;
+ u32 page_count;
+ struct bna_mem_info *mem_info;
+
+ res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_tcb);
+ mem_info->num = num_txq;
+
+ q_size = txq_depth * BFI_TXQ_WI_SIZE;
+ q_size = ALIGN(q_size, PAGE_SIZE);
+ page_count = q_size >> PAGE_SHIFT;
+
+ res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = page_count * sizeof(struct bna_dma_addr);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = page_count * sizeof(void *);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = num_txq * page_count;
+
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
+}
+
+struct bna_tx *
+bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv)
+{
+ struct bna_intr_info *intr_info;
+ struct bna_tx_mod *tx_mod = &bna->tx_mod;
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct list_head *qe;
+ struct bna_ib_mod *ib_mod = &bna->ib_mod;
+ struct bna_doorbell_qset *qset;
+ struct bna_ib_config ib_config;
+ int page_count;
+ int page_size;
+ int page_idx;
+ int i;
+ unsigned long off;
+
+ intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
+ tx_cfg->num_txq;
+ page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
+
+ /**
+ * Get resources
+ */
+
+ if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
+ return NULL;
+
+ /* Tx */
+
+ if (list_empty(&tx_mod->tx_free_q))
+ return NULL;
+ bfa_q_deq(&tx_mod->tx_free_q, &tx);
+ bfa_q_qe_init(&tx->qe);
+
+ /* TxQs */
+
+ INIT_LIST_HEAD(&tx->txq_q);
+ for (i = 0; i < tx_cfg->num_txq; i++) {
+ if (list_empty(&tx_mod->txq_free_q))
+ goto err_return;
+
+ bfa_q_deq(&tx_mod->txq_free_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ list_add_tail(&txq->qe, &tx->txq_q);
+ txq->ib = NULL;
+ txq->ib_seg_offset = -1;
+ txq->tx = tx;
+ }
+
+ /* IBs */
+ i = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+
+ if (intr_info->num == 1)
+ txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
+ intr_info->idl[0].vector);
+ else
+ txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
+ intr_info->idl[i].vector);
+
+ if (txq->ib == NULL)
+ goto err_return;
+
+ txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
+ if (txq->ib_seg_offset == -1)
+ goto err_return;
+
+ i++;
+ }
+
+ /*
+ * Initialize
+ */
+
+ /* Tx */
+
+ tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
+ tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
+ tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
+ tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
+
+ list_add_tail(&tx->qe, &tx_mod->tx_active_q);
+ tx->bna = bna;
+ tx->priv = priv;
+ tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
+ tx->txq_stop_wc.wc_cbarg = tx;
+ tx->txq_stop_wc.wc_count = 0;
+
+ tx->type = tx_cfg->tx_type;
+
+ tx->flags = 0;
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
+ switch (tx->type) {
+ case BNA_TX_T_REGULAR:
+ if (!(tx->bna->tx_mod.flags &
+ BNA_TX_MOD_F_PORT_LOOPBACK))
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ break;
+ case BNA_TX_T_LOOPBACK:
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ break;
+ }
+ }
+ if (tx->bna->tx_mod.cee_link)
+ tx->flags |= BNA_TX_F_PRIO_LOCK;
+
+ /* TxQ */
+
+ i = 0;
+ page_idx = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->priority = tx_mod->priority;
+ txq->tcb = (struct bna_tcb *)
+ res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
+ txq->tx_packets = 0;
+ txq->tx_bytes = 0;
+
+ /* IB */
+
+ ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
+ ib_config.interpkt_timeo = 0; /* Not used */
+ ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
+ ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
+ BFI_IB_CF_INT_ENABLE |
+ BFI_IB_CF_COALESCING_MODE);
+ bna_ib_config(txq->ib, &ib_config);
+
+ /* TCB */
+
+ txq->tcb->producer_index = 0;
+ txq->tcb->consumer_index = 0;
+ txq->tcb->hw_consumer_index = (volatile u32 *)
+ ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
+ (txq->ib_seg_offset * BFI_IBIDX_SIZE));
+ *(txq->tcb->hw_consumer_index) = 0;
+ txq->tcb->q_depth = tx_cfg->txq_depth;
+ txq->tcb->unmap_q = (void *)
+ res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
+ qset = (struct bna_doorbell_qset *)0;
+ off = (unsigned long)&qset[txq->txq_id].txq[0];
+ txq->tcb->q_dbell = off +
+ BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
+ txq->tcb->i_dbell = &txq->ib->door_bell;
+ txq->tcb->intr_type = intr_info->intr_type;
+ txq->tcb->intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ txq->tcb->txq = txq;
+ txq->tcb->bnad = bnad;
+ txq->tcb->id = i;
+
+ /* QPT, SWQPT, Pages */
+ bna_txq_qpt_setup(txq, page_count, page_size,
+ &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_PAGE].
+ res_u.mem_info.mdl[page_idx]);
+ txq->tcb->page_idx = page_idx;
+ txq->tcb->page_count = page_count;
+ page_idx += page_count;
+
+ /* Callback to bnad for setting up TCB */
+ if (tx->tcb_setup_cbfn)
+ (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
+
+ i++;
+ }
+
+ /* TxF */
+
+ tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
+ tx->txf.vlan = 0;
+
+ /* Mbox element */
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+
+ return tx;
+
+err_return:
+ bna_tx_free(tx);
+ return NULL;
+}
+
+void
+bna_tx_destroy(struct bna_tx *tx)
+{
+ /* Callback to bnad for destroying TCB */
+ if (tx->tcb_destroy_cbfn) {
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ }
+
+ bna_tx_free(tx);
+}
+
+void
+bna_tx_enable(struct bna_tx *tx)
+{
+ if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ return;
+
+ tx->flags |= BNA_TX_F_ENABLED;
+
+ if (tx->flags & BNA_TX_F_PORT_STARTED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+void
+bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
+ return;
+ }
+
+ tx->stop_cbfn = cbfn;
+ tx->stop_cbarg = tx->bna->bnad;
+
+ tx->flags &= ~BNA_TX_F_ENABLED;
+
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+int
+bna_tx_state_get(struct bna_tx *tx)
+{
+ return bfa_sm_to_state(tx_sm_table, tx->fsm);
+}
+
+void
+bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ tx_mod->bna = bna;
+ tx_mod->flags = 0;
+
+ tx_mod->tx = (struct bna_tx *)
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
+ tx_mod->txq = (struct bna_txq *)
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&tx_mod->tx_free_q);
+ INIT_LIST_HEAD(&tx_mod->tx_active_q);
+
+ INIT_LIST_HEAD(&tx_mod->txq_free_q);
+
+ for (i = 0; i < BFI_MAX_TXQ; i++) {
+ tx_mod->tx[i].txf.txf_id = i;
+ bfa_q_qe_init(&tx_mod->tx[i].qe);
+ list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
+
+ tx_mod->txq[i].txq_id = i;
+ bfa_q_qe_init(&tx_mod->txq[i].qe);
+ list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
+ }
+
+ tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
+ tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
+ tx_mod->tx_stop_wc.wc_count = 0;
+}
+
+void
+bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->tx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->txq_free_q)
+ i++;
+
+ tx_mod->bna = NULL;
+}
+
+void
+bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
+ if (type == BNA_TX_T_LOOPBACK)
+ tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_start(tx);
+ }
+}
+
+void
+bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
+
+ /**
+ * Before calling bna_tx_stop(), increment tx_stop_wc as many times
+ * as we are going to call bna_tx_stop
+ */
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bfa_wc_up(&tx_mod->tx_stop_wc);
+ }
+
+ if (tx_mod->tx_stop_wc.wc_count == 0) {
+ tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
+ tx_mod->stop_cbfn = NULL;
+ return;
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_stop(tx);
+ }
+}
+
+void
+bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_fail(tx);
+ }
+}
+
+void
+bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ if (prio != tx_mod->priority) {
+ tx_mod->priority = prio;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_prio_changed(tx, prio);
+ }
+ }
+}
+
+void
+bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->cee_link = cee_link;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_cee_link_status(tx, cee_link);
+ }
+}
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNA_TYPES_H__
+#define __BNA_TYPES_H__
+
+#include "cna.h"
+#include "bna_hw.h"
+#include "bfa_cee.h"
+
+/**
+ *
+ * Forward declarations
+ *
+ */
+
+struct bna_txq;
+struct bna_tx;
+struct bna_rxq;
+struct bna_cq;
+struct bna_rx;
+struct bna_rxf;
+struct bna_port;
+struct bna;
+struct bnad;
+
+/**
+ *
+ * Enums, primitive data types
+ *
+ */
+
+enum bna_status {
+ BNA_STATUS_T_DISABLED = 0,
+ BNA_STATUS_T_ENABLED = 1
+};
+
+enum bna_cleanup_type {
+ BNA_HARD_CLEANUP = 0,
+ BNA_SOFT_CLEANUP = 1
+};
+
+enum bna_cb_status {
+ BNA_CB_SUCCESS = 0,
+ BNA_CB_FAIL = 1,
+ BNA_CB_INTERRUPT = 2,
+ BNA_CB_BUSY = 3,
+ BNA_CB_INVALID_MAC = 4,
+ BNA_CB_MCAST_LIST_FULL = 5,
+ BNA_CB_UCAST_CAM_FULL = 6,
+ BNA_CB_WAITING = 7,
+ BNA_CB_NOT_EXEC = 8
+};
+
+enum bna_res_type {
+ BNA_RES_T_MEM = 1,
+ BNA_RES_T_INTR = 2
+};
+
+enum bna_mem_type {
+ BNA_MEM_T_KVA = 1,
+ BNA_MEM_T_DMA = 2
+};
+
+enum bna_intr_type {
+ BNA_INTR_T_INTX = 1,
+ BNA_INTR_T_MSIX = 2
+};
+
+enum bna_res_req_type {
+ BNA_RES_MEM_T_COM = 0,
+ BNA_RES_MEM_T_ATTR = 1,
+ BNA_RES_MEM_T_FWTRC = 2,
+ BNA_RES_MEM_T_STATS = 3,
+ BNA_RES_MEM_T_SWSTATS = 4,
+ BNA_RES_MEM_T_IBIDX = 5,
+ BNA_RES_MEM_T_IB_ARRAY = 6,
+ BNA_RES_MEM_T_INTR_ARRAY = 7,
+ BNA_RES_MEM_T_IDXSEG_ARRAY = 8,
+ BNA_RES_MEM_T_TX_ARRAY = 9,
+ BNA_RES_MEM_T_TXQ_ARRAY = 10,
+ BNA_RES_MEM_T_RX_ARRAY = 11,
+ BNA_RES_MEM_T_RXP_ARRAY = 12,
+ BNA_RES_MEM_T_RXQ_ARRAY = 13,
+ BNA_RES_MEM_T_UCMAC_ARRAY = 14,
+ BNA_RES_MEM_T_MCMAC_ARRAY = 15,
+ BNA_RES_MEM_T_RIT_ENTRY = 16,
+ BNA_RES_MEM_T_RIT_SEGMENT = 17,
+ BNA_RES_INTR_T_MBOX = 18,
+ BNA_RES_T_MAX
+};
+
+enum bna_tx_res_req_type {
+ BNA_TX_RES_MEM_T_TCB = 0,
+ BNA_TX_RES_MEM_T_UNMAPQ = 1,
+ BNA_TX_RES_MEM_T_QPT = 2,
+ BNA_TX_RES_MEM_T_SWQPT = 3,
+ BNA_TX_RES_MEM_T_PAGE = 4,
+ BNA_TX_RES_INTR_T_TXCMPL = 5,
+ BNA_TX_RES_T_MAX,
+};
+
+enum bna_rx_mem_type {
+ BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
+ BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
+ BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
+ BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
+ BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
+ BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
+ BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
+ BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
+ BNA_RX_RES_T_INTR = 12, /* Rx interrupts */
+ BNA_RX_RES_T_MAX = 13
+};
+
+enum bna_mbox_state {
+ BNA_MBOX_FREE = 0,
+ BNA_MBOX_POSTED = 1
+};
+
+enum bna_tx_type {
+ BNA_TX_T_REGULAR = 0,
+ BNA_TX_T_LOOPBACK = 1,
+};
+
+enum bna_tx_flags {
+ BNA_TX_F_PORT_STARTED = 1,
+ BNA_TX_F_ENABLED = 2,
+ BNA_TX_F_PRIO_LOCK = 4,
+};
+
+enum bna_tx_mod_flags {
+ BNA_TX_MOD_F_PORT_STARTED = 1,
+ BNA_TX_MOD_F_PORT_LOOPBACK = 2,
+};
+
+enum bna_rx_type {
+ BNA_RX_T_REGULAR = 0,
+ BNA_RX_T_LOOPBACK = 1,
+};
+
+enum bna_rxp_type {
+ BNA_RXP_SINGLE = 1,
+ BNA_RXP_SLR = 2,
+ BNA_RXP_HDS = 3
+};
+
+enum bna_rxmode {
+ BNA_RXMODE_PROMISC = 1,
+ BNA_RXMODE_DEFAULT = 2,
+ BNA_RXMODE_ALLMULTI = 4
+};
+
+enum bna_rx_event {
+ RX_E_START = 1,
+ RX_E_STOP = 2,
+ RX_E_FAIL = 3,
+ RX_E_RXF_STARTED = 4,
+ RX_E_RXF_STOPPED = 5,
+ RX_E_RXQ_STOPPED = 6,
+};
+
+enum bna_rx_state {
+ BNA_RX_STOPPED = 1,
+ BNA_RX_RXF_START_WAIT = 2,
+ BNA_RX_STARTED = 3,
+ BNA_RX_RXF_STOP_WAIT = 4,
+ BNA_RX_RXQ_STOP_WAIT = 5,
+};
+
+enum bna_rx_flags {
+ BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */
+ BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */
+ BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
+};
+
+enum bna_rx_mod_flags {
+ BNA_RX_MOD_F_PORT_STARTED = 1,
+ BNA_RX_MOD_F_PORT_LOOPBACK = 2,
+};
+
+enum bna_rxf_oper_state {
+ BNA_RXF_OPER_STATE_RUNNING = 0x01, /* rxf operational */
+ BNA_RXF_OPER_STATE_PAUSED = 0x02, /* rxf in PAUSED state */
+};
+
+enum bna_rxf_flags {
+ BNA_RXF_FL_STOP_PENDING = 0x01,
+ BNA_RXF_FL_FAILED = 0x02,
+ BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
+ BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
+ BNA_RXF_FL_RXF_ENABLED = 0x10,
+ BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
+};
+
+enum bna_rxf_event {
+ RXF_E_START = 1,
+ RXF_E_STOP = 2,
+ RXF_E_FAIL = 3,
+ RXF_E_CAM_FLTR_MOD = 4,
+ RXF_E_STARTED = 5,
+ RXF_E_STOPPED = 6,
+ RXF_E_CAM_FLTR_RESP = 7,
+ RXF_E_PAUSE = 8,
+ RXF_E_RESUME = 9,
+ RXF_E_STAT_CLEARED = 10,
+};
+
+enum bna_rxf_state {
+ BNA_RXF_STOPPED = 1,
+ BNA_RXF_START_WAIT = 2,
+ BNA_RXF_CAM_FLTR_MOD_WAIT = 3,
+ BNA_RXF_STARTED = 4,
+ BNA_RXF_CAM_FLTR_CLR_WAIT = 5,
+ BNA_RXF_STOP_WAIT = 6,
+ BNA_RXF_PAUSE_WAIT = 7,
+ BNA_RXF_RESUME_WAIT = 8,
+ BNA_RXF_STAT_CLR_WAIT = 9,
+};
+
+enum bna_port_type {
+ BNA_PORT_T_REGULAR = 0,
+ BNA_PORT_T_LOOPBACK_INTERNAL = 1,
+ BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
+};
+
+enum bna_link_status {
+ BNA_LINK_DOWN = 0,
+ BNA_LINK_UP = 1,
+ BNA_CEE_UP = 2
+};
+
+enum bna_llport_flags {
+ BNA_LLPORT_F_ENABLED = 1,
+ BNA_LLPORT_F_RX_ENABLED = 2
+};
+
+enum bna_port_flags {
+ BNA_PORT_F_DEVICE_READY = 1,
+ BNA_PORT_F_ENABLED = 2,
+ BNA_PORT_F_PAUSE_CHANGED = 4,
+ BNA_PORT_F_MTU_CHANGED = 8
+};
+
+enum bna_pkt_rates {
+ BNA_PKT_RATE_10K = 10000,
+ BNA_PKT_RATE_20K = 20000,
+ BNA_PKT_RATE_30K = 30000,
+ BNA_PKT_RATE_40K = 40000,
+ BNA_PKT_RATE_50K = 50000,
+ BNA_PKT_RATE_60K = 60000,
+ BNA_PKT_RATE_70K = 70000,
+ BNA_PKT_RATE_80K = 80000,
+};
+
+enum bna_dim_load_types {
+ BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
+ BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
+ BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
+ BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
+ BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
+ BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
+ BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
+ BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
+ BNA_LOAD_T_MAX = 8
+};
+
+enum bna_dim_bias_types {
+ BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
+ BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
+ BNA_BIAS_T_MAX = 2
+};
+
+struct bna_mac {
+ /* This should be the first one */
+ struct list_head qe;
+ u8 addr[ETH_ALEN];
+};
+
+struct bna_mem_descr {
+ u32 len;
+ void *kva;
+ struct bna_dma_addr dma;
+};
+
+struct bna_mem_info {
+ enum bna_mem_type mem_type;
+ u32 len;
+ u32 num;
+ u32 align_sz; /* 0/1 = no alignment */
+ struct bna_mem_descr *mdl;
+ void *cookie; /* For bnad to unmap dma later */
+};
+
+struct bna_intr_descr {
+ int vector;
+};
+
+struct bna_intr_info {
+ enum bna_intr_type intr_type;
+ int num;
+ struct bna_intr_descr *idl;
+};
+
+union bna_res_u {
+ struct bna_mem_info mem_info;
+ struct bna_intr_info intr_info;
+};
+
+struct bna_res_info {
+ enum bna_res_type res_type;
+ union bna_res_u res_u;
+};
+
+/* HW QPT */
+struct bna_qpt {
+ struct bna_dma_addr hw_qpt_ptr;
+ void *kv_qpt_ptr;
+ u32 page_count;
+ u32 page_size;
+};
+
+/**
+ *
+ * Device
+ *
+ */
+
+struct bna_device {
+ bfa_fsm_t fsm;
+ struct bfa_ioc ioc;
+
+ enum bna_intr_type intr_type;
+ int vector;
+
+ void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
+ struct bnad *ready_cbarg;
+
+ void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
+ struct bnad *stop_cbarg;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Mail box
+ *
+ */
+
+struct bna_mbox_qe {
+ /* This should be the first one */
+ struct list_head qe;
+
+ struct bfa_mbox_cmd cmd;
+ u32 cmd_len;
+ /* Callback for port, tx, rx, rxf */
+ void (*cbfn)(void *arg, int status);
+ void *cbarg;
+};
+
+struct bna_mbox_mod {
+ enum bna_mbox_state state;
+ struct list_head posted_q;
+ u32 msg_pending;
+ u32 msg_ctr;
+ struct bna *bna;
+};
+
+/**
+ *
+ * Port
+ *
+ */
+
+/* Pause configuration */
+struct bna_pause_config {
+ enum bna_status tx_pause;
+ enum bna_status rx_pause;
+};
+
+struct bna_llport {
+ bfa_fsm_t fsm;
+ enum bna_llport_flags flags;
+
+ enum bna_port_type type;
+
+ enum bna_link_status link_status;
+
+ int admin_up_count;
+
+ void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+};
+
+struct bna_port {
+ bfa_fsm_t fsm;
+ enum bna_port_flags flags;
+
+ enum bna_port_type type;
+
+ struct bna_llport llport;
+
+ struct bna_pause_config pause_config;
+ u8 priority;
+ int mtu;
+
+ /* Callback for bna_port_disable(), port_stop() */
+ void (*stop_cbfn)(void *, enum bna_cb_status);
+ void *stop_cbarg;
+
+ /* Callback for bna_port_pause_config() */
+ void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
+
+ /* Callback for bna_port_mtu_set() */
+ void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
+
+ void (*link_cbfn)(struct bnad *, enum bna_link_status);
+
+ struct bfa_wc chld_stop_wc;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Interrupt Block
+ *
+ */
+
+/* IB index segment structure */
+struct bna_ibidx_seg {
+ /* This should be the first one */
+ struct list_head qe;
+
+ u8 ib_seg_size;
+ u8 ib_idx_tbl_offset;
+};
+
+/* Interrupt structure */
+struct bna_intr {
+ /* This should be the first one */
+ struct list_head qe;
+ int ref_count;
+
+ enum bna_intr_type intr_type;
+ int vector;
+
+ struct bna_ib *ib;
+};
+
+/* Doorbell structure */
+struct bna_ib_dbell {
+ void *__iomem doorbell_addr;
+ u32 doorbell_ack;
+};
+
+/* Interrupt timer configuration */
+struct bna_ib_config {
+ u8 coalescing_timeo; /* Unit is 5usec. */
+
+ int interpkt_count;
+ int interpkt_timeo;
+
+ enum ib_flags ctrl_flags;
+};
+
+/* IB structure */
+struct bna_ib {
+ /* This should be the first one */
+ struct list_head qe;
+
+ int ib_id;
+
+ int ref_count;
+ int start_count;
+
+ struct bna_dma_addr ib_seg_host_addr;
+ void *ib_seg_host_addr_kva;
+ u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
+
+ struct bna_ibidx_seg *idx_seg;
+
+ struct bna_ib_dbell door_bell;
+
+ struct bna_intr *intr;
+
+ struct bna_ib_config ib_config;
+
+ struct bna *bna;
+};
+
+/* IB module - keeps track of IBs and interrupts */
+struct bna_ib_mod {
+ struct bna_ib *ib; /* BFI_MAX_IB entries */
+ struct bna_intr *intr; /* BFI_MAX_IB entries */
+ struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
+
+ struct list_head ib_free_q;
+
+ struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
+
+ struct list_head intr_free_q;
+ struct list_head intr_active_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Tx object
+ *
+ */
+
+/* Tx datapath control structure */
+#define BNA_Q_NAME_SIZE 16
+struct bna_tcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ volatile u32 *hw_consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ struct bna_ib_dbell *i_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_txq *txq;
+ struct bnad *bnad;
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 priority; /* Current priority */
+ unsigned long flags; /* Used by bnad as required */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* TxQ QPT and configuration */
+struct bna_txq {
+ /* This should be the first one */
+ struct list_head qe;
+
+ int txq_id;
+
+ u8 priority;
+
+ struct bna_qpt qpt;
+ struct bna_tcb *tcb;
+ struct bna_ib *ib;
+ int ib_seg_offset;
+
+ struct bna_tx *tx;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/* TxF structure (hardware Tx Function) */
+struct bna_txf {
+ int txf_id;
+ enum txf_flags ctrl_flags;
+ u16 vlan;
+};
+
+/* Tx object */
+struct bna_tx {
+ /* This should be the first one */
+ struct list_head qe;
+
+ bfa_fsm_t fsm;
+ enum bna_tx_flags flags;
+
+ enum bna_tx_type type;
+
+ struct list_head txq_q;
+ struct bna_txf txf;
+
+ /* Tx event handlers */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+
+ /* callback for bna_tx_disable(), bna_tx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status);
+ void *stop_cbarg;
+
+ /* callback for bna_tx_prio_set() */
+ void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
+ enum bna_cb_status status);
+
+ struct bfa_wc txq_stop_wc;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_tx_config {
+ int num_txq;
+ int txq_depth;
+ enum bna_tx_type tx_type;
+};
+
+struct bna_tx_event_cbfn {
+ /* Optional */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ /* Mandatory */
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+};
+
+/* Tx module - keeps track of free, active tx objects */
+struct bna_tx_mod {
+ struct bna_tx *tx; /* BFI_MAX_TXQ entries */
+ struct bna_txq *txq; /* BFI_MAX_TXQ entries */
+
+ struct list_head tx_free_q;
+ struct list_head tx_active_q;
+
+ struct list_head txq_free_q;
+
+ /* callback for bna_tx_mod_stop() */
+ void (*stop_cbfn)(struct bna_port *port,
+ enum bna_cb_status status);
+
+ struct bfa_wc tx_stop_wc;
+
+ enum bna_tx_mod_flags flags;
+
+ int priority;
+ int cee_link;
+
+ u32 txf_bmap[2];
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Receive Indirection Table
+ *
+ */
+
+/* One row of RIT table */
+struct bna_rit_entry {
+ u8 large_rxq_id; /* used for either large or data buffers */
+ u8 small_rxq_id; /* used for either small or header buffers */
+};
+
+/* RIT segment */
+struct bna_rit_segment {
+ struct list_head qe;
+
+ u32 rit_offset;
+ u32 rit_size;
+ /**
+ * max_rit_size: Varies per RIT segment depending on how RIT is
+ * partitioned
+ */
+ u32 max_rit_size;
+
+ struct bna_rit_entry *rit;
+};
+
+struct bna_rit_mod {
+ struct bna_rit_entry *rit;
+ struct bna_rit_segment *rit_segment;
+
+ struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
+};
+
+/**
+ *
+ * Rx object
+ *
+ */
+
+/* Rx datapath control structure */
+struct bna_rcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_rxq *rxq;
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ unsigned long flags;
+ int id;
+};
+
+/* RxQ structure - QPT, configuration */
+struct bna_rxq {
+ struct list_head qe;
+ int rxq_id;
+
+ int buffer_size;
+ int q_depth;
+
+ struct bna_qpt qpt;
+ struct bna_rcb *rcb;
+
+ struct bna_rxp *rxp;
+ struct bna_rx *rx;
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_packets_with_error;
+ u64 rxbuf_alloc_failed;
+};
+
+/* RxQ pair */
+union bna_rxq_u {
+ struct {
+ struct bna_rxq *hdr;
+ struct bna_rxq *data;
+ } hds;
+ struct {
+ struct bna_rxq *small;
+ struct bna_rxq *large;
+ } slr;
+ struct {
+ struct bna_rxq *only;
+ struct bna_rxq *reserved;
+ } single;
+};
+
+/* Packet rate for Dynamic Interrupt Moderation */
+struct bna_pkt_rate {
+ u32 small_pkt_cnt;
+ u32 large_pkt_cnt;
+};
+
+/* Completion control structure */
+struct bna_ccb {
+ /* Fast path */
+ void **sw_qpt;
+ u32 producer_index;
+ volatile u32 *hw_producer_index;
+ u32 q_depth;
+ struct bna_ib_dbell *i_dbell;
+ struct bna_rcb *rcb[2];
+ void *ctrl; /* For bnad */
+ struct bna_pkt_rate pkt_rate;
+ int page_idx;
+ int page_count;
+
+ /* Control path */
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 rx_coalescing_timeo; /* For NAPI */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* CQ QPT, configuration */
+struct bna_cq {
+ int cq_id;
+
+ struct bna_qpt qpt;
+ struct bna_ccb *ccb;
+
+ struct bna_ib *ib;
+ u8 ib_seg_offset;
+
+ struct bna_rx *rx;
+};
+
+struct bna_rss_config {
+ enum rss_hash_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+};
+
+struct bna_hds_config {
+ enum hds_header_type hdr_type;
+ int header_size;
+};
+
+/* This structure is used during RX creation */
+struct bna_rx_config {
+ enum bna_rx_type rx_type;
+ int num_paths;
+ enum bna_rxp_type rxp_type;
+ int paused;
+ int q_depth;
+ /*
+ * Small/Large (or Header/Data) buffer size to be configured
+ * for SLR and HDS queue type. Large buffer size comes from
+ * port->mtu.
+ */
+ int small_buff_size;
+
+ enum bna_status rss_status;
+ struct bna_rss_config rss_config;
+
+ enum bna_status hds_status;
+ struct bna_hds_config hds_config;
+
+ enum bna_status vlan_strip_status;
+};
+
+/* Rx Path structure - one per MSIX vector/CPU */
+struct bna_rxp {
+ /* This should be the first one */
+ struct list_head qe;
+
+ enum bna_rxp_type type;
+ union bna_rxq_u rxq;
+ struct bna_cq cq;
+
+ struct bna_rx *rx;
+
+ /* MSI-x vector number for configuring RSS */
+ int vector;
+
+ struct bna_mbox_qe mbox_qe;
+};
+
+/* HDS configuration structure */
+struct bna_rxf_hds {
+ enum hds_header_type hdr_type;
+ int header_size;
+};
+
+/* RSS configuration structure */
+struct bna_rxf_rss {
+ enum rss_hash_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+};
+
+/* RxF structure (hardware Rx Function) */
+struct bna_rxf {
+ bfa_fsm_t fsm;
+ int rxf_id;
+ enum rxf_flags ctrl_flags;
+ u16 default_vlan_tag;
+ enum bna_rxf_oper_state rxf_oper_state;
+ enum bna_status hds_status;
+ struct bna_rxf_hds hds_cfg;
+ enum bna_status rss_status;
+ struct bna_rxf_rss rss_cfg;
+ struct bna_rit_segment *rit_segment;
+ struct bna_rx *rx;
+ u32 forced_offset;
+ struct bna_mbox_qe mbox_qe;
+ int mcast_rxq_id;
+
+ /* callback for bna_rxf_start() */
+ void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+ struct bna_rx *start_cbarg;
+
+ /* callback for bna_rxf_stop() */
+ void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+ struct bna_rx *stop_cbarg;
+
+ /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
+ void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status);
+ struct bnad *oper_state_cbarg;
+
+ /**
+ * callback for:
+ * bna_rxf_ucast_set()
+ * bna_rxf_{ucast/mcast}_add(),
+ * bna_rxf_{ucast/mcast}_del(),
+ * bna_rxf_mode_set()
+ */
+ void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status);
+ struct bnad *cam_fltr_cbarg;
+
+ enum bna_rxf_flags rxf_flags;
+
+ /* List of unicast addresses yet to be applied to h/w */
+ struct list_head ucast_pending_add_q;
+ struct list_head ucast_pending_del_q;
+ int ucast_pending_set;
+ /* ucast addresses applied to the h/w */
+ struct list_head ucast_active_q;
+ struct bna_mac *ucast_active_mac;
+
+ /* List of multicast addresses yet to be applied to h/w */
+ struct list_head mcast_pending_add_q;
+ struct list_head mcast_pending_del_q;
+ /* multicast addresses applied to the h/w */
+ struct list_head mcast_active_q;
+
+ /* Rx modes yet to be applied to h/w */
+ enum bna_rxmode rxmode_pending;
+ enum bna_rxmode rxmode_pending_bitmask;
+ /* Rx modes applied to h/w */
+ enum bna_rxmode rxmode_active;
+
+ enum bna_status vlan_filter_status;
+ u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
+};
+
+/* Rx object */
+struct bna_rx {
+ /* This should be the first one */
+ struct list_head qe;
+
+ bfa_fsm_t fsm;
+
+ enum bna_rx_type type;
+
+ /* list-head for RX path objects */
+ struct list_head rxp_q;
+
+ struct bna_rxf rxf;
+
+ enum bna_rx_flags rx_flags;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bfa_wc rxq_stop_wc;
+
+ /* Rx event handlers */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+
+ /* callback for bna_rx_disable(), bna_rx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status);
+ void *stop_cbarg;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_rx_event_cbfn {
+ /* Optional */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ /* Mandatory */
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+};
+
+/* Rx module - keeps track of free, active rx objects */
+struct bna_rx_mod {
+ struct bna *bna; /* back pointer to parent */
+ struct bna_rx *rx; /* BFI_MAX_RXQ entries */
+ struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
+ struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
+
+ struct list_head rx_free_q;
+ struct list_head rx_active_q;
+ int rx_free_count;
+
+ struct list_head rxp_free_q;
+ int rxp_free_count;
+
+ struct list_head rxq_free_q;
+ int rxq_free_count;
+
+ enum bna_rx_mod_flags flags;
+
+ /* callback for bna_rx_mod_stop() */
+ void (*stop_cbfn)(struct bna_port *port,
+ enum bna_cb_status status);
+
+ struct bfa_wc rx_stop_wc;
+ u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
+ u32 rxf_bmap[2];
+};
+
+/**
+ *
+ * CAM
+ *
+ */
+
+struct bna_ucam_mod {
+ struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+struct bna_mcam_mod {
+ struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Statistics
+ *
+ */
+
+struct bna_tx_stats {
+ int tx_state;
+ int tx_flags;
+ int num_txqs;
+ u32 txq_bmap[2];
+ int txf_id;
+};
+
+struct bna_rx_stats {
+ int rx_state;
+ int rx_flags;
+ int num_rxps;
+ int num_rxqs;
+ u32 rxq_bmap[2];
+ u32 cq_bmap[2];
+ int rxf_id;
+ int rxf_state;
+ int rxf_oper_state;
+ int num_active_ucast;
+ int num_active_mcast;
+ int rxmode_active;
+ int vlan_filter_status;
+ u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
+ int rss_status;
+ int hds_status;
+};
+
+struct bna_sw_stats {
+ int device_state;
+ int port_state;
+ int port_flags;
+ int llport_state;
+ int priority;
+ int num_active_tx;
+ int num_active_rx;
+ struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
+ struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
+};
+
+struct bna_stats {
+ u32 txf_bmap[2];
+ u32 rxf_bmap[2];
+ struct bfi_ll_stats *hw_stats;
+ struct bna_sw_stats *sw_stats;
+};
+
+/**
+ *
+ * BNA
+ *
+ */
+
+struct bna {
+ struct bfa_pcidev pcidev;
+
+ int port_num;
+
+ struct bna_chip_regs regs;
+
+ struct bna_dma_addr hw_stats_dma;
+ struct bna_stats stats;
+
+ struct bna_device device;
+ struct bfa_cee cee;
+
+ struct bna_mbox_mod mbox_mod;
+
+ struct bna_port port;
+
+ struct bna_tx_mod tx_mod;
+
+ struct bna_rx_mod rx_mod;
+
+ struct bna_ib_mod ib_mod;
+
+ struct bna_ucam_mod ucam_mod;
+ struct bna_mcam_mod mcam_mod;
+
+ struct bna_rit_mod rit_mod;
+
+ int rxf_default_id;
+ int rxf_promisc_id;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bnad *bnad;
+};
+
+#endif /* __BNA_TYPES_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+#include "bnad.h"
+#include "bna.h"
+#include "cna.h"
+
+DEFINE_MUTEX(bnad_fwimg_mutex);
+
+/*
+ * Module params
+ */
+static uint bnad_msix_disable;
+module_param(bnad_msix_disable, uint, 0444);
+MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+
+/*
+ * Global variables
+ */
+u32 bnad_rxqs_per_cq = 2;
+
+const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+/*
+ * Local MACROS
+ */
+#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
+
+#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
+
+#define BNAD_GET_MBOX_IRQ(_bnad) \
+ (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
+ ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
+ ((_bnad)->pcidev->irq))
+
+#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
+do { \
+ (_res_info)->res_type = BNA_RES_T_MEM; \
+ (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
+ (_res_info)->res_u.mem_info.num = (_num); \
+ (_res_info)->res_u.mem_info.len = \
+ sizeof(struct bnad_unmap_q) + \
+ (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
+} while (0)
+
+/*
+ * Reinitialize completions in CQ, once Rx is taken down
+ */
+static void
+bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ unsigned int wi_range, wis = 0, ccb_prod = 0;
+ int i;
+
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
+ wi_range);
+
+ for (i = 0; i < ccb->q_depth; i++) {
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ }
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+}
+
+/*
+ * Frees all pending Tx Bufs
+ * At this point no activity is expected on the Q,
+ * so DMA unmap & freeing is fine.
+ */
+static void
+bnad_free_all_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u16 unmap_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb = NULL;
+ int i;
+
+ unmap_array = unmap_q->unmap_array;
+
+ unmap_cons = 0;
+ while (unmap_cons < unmap_q->q_depth) {
+ skb = unmap_array[unmap_cons].skb;
+ if (!skb) {
+ unmap_cons++;
+ continue;
+ }
+ unmap_array[unmap_cons].skb = NULL;
+
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr), skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ unmap_cons++;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pci_unmap_page(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ 0);
+ unmap_cons++;
+ }
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Data Path Handlers */
+
+/*
+ * bnad_free_txbufs : Frees the Tx bufs on Tx completion
+ * Can be called in a) Interrupt context
+ * b) Sending context
+ * c) Tasklet context
+ */
+static u32
+bnad_free_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u32 sent_packets = 0, sent_bytes = 0;
+ u16 wis, unmap_cons, updated_hw_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb;
+ int i;
+
+ /*
+ * Just return if TX is stopped. This check is useful
+ * when bnad_free_txbufs() runs out of a tasklet scheduled
+ * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
+ * but this routine runs actually after the cleanup has been
+ * executed.
+ */
+ if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+ return 0;
+
+ updated_hw_cons = *(tcb->hw_consumer_index);
+
+ wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
+ updated_hw_cons, tcb->q_depth);
+
+ BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
+
+ prefetch(&unmap_array[unmap_cons + 1]);
+ while (wis) {
+ skb = unmap_array[unmap_cons].skb;
+
+ unmap_array[unmap_cons].skb = NULL;
+
+ sent_packets++;
+ sent_bytes += skb->len;
+ wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr), skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
+
+ prefetch(&unmap_array[unmap_cons + 1]);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ prefetch(&unmap_array[unmap_cons + 1]);
+
+ pci_unmap_page(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ 0);
+ BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Update consumer pointers. */
+ tcb->consumer_index = updated_hw_cons;
+ unmap_q->consumer_index = unmap_cons;
+
+ tcb->txq->tx_packets += sent_packets;
+ tcb->txq->tx_bytes += sent_bytes;
+
+ return sent_packets;
+}
+
+/* Tx Free Tasklet function */
+/* Frees for all the tcb's in all the Tx's */
+/*
+ * Scheduled from sending context, so that
+ * the fat Tx lock is not held for too long
+ * in the sending context.
+ */
+static void
+bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+ struct bnad *bnad = (struct bnad *)bnad_ptr;
+ struct bna_tcb *tcb;
+ u32 acked;
+ int i, j;
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ if (!tcb)
+ continue;
+ if (((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index) &&
+ (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
+ &tcb->flags))) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ }
+ }
+ }
+}
+
+static u32
+bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 sent;
+
+ if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ return 0;
+
+ sent = bnad_free_txbufs(bnad, tcb);
+ if (sent) {
+ if (netif_queue_stopped(netdev) &&
+ netif_carrier_ok(netdev) &&
+ BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+ BNAD_NETIF_WAKE_THRESHOLD) {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ bna_ib_ack(tcb->i_dbell, sent);
+ } else
+ bna_ib_ack(tcb->i_dbell, 0);
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ return sent;
+}
+
+/* MSIX Tx Completion Handler */
+static irqreturn_t
+bnad_msix_tx(int irq, void *data)
+{
+ struct bna_tcb *tcb = (struct bna_tcb *)data;
+ struct bnad *bnad = tcb->bnad;
+
+ bnad_tx(bnad, tcb);
+
+ return IRQ_HANDLED;
+}
+
+static void
+bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ rcb->producer_index = 0;
+ rcb->consumer_index = 0;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+}
+
+static void
+bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q;
+ struct sk_buff *skb;
+
+ unmap_q = rcb->unmap_q;
+ while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+ skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ BUG_ON(!(skb));
+ unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+ pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
+ unmap_array[unmap_q->consumer_index],
+ dma_addr), rcb->rxq->buffer_size +
+ NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+ }
+
+ bnad_reset_rcb(bnad, rcb);
+}
+
+static void
+bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ u16 to_alloc, alloced, unmap_prod, wi_range;
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct bna_rxq_entry *rxent;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ alloced = 0;
+ to_alloc =
+ BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_prod = unmap_q->producer_index;
+
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
+
+ while (to_alloc--) {
+ if (!wi_range) {
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
+ wi_range);
+ }
+ skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
+ GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ goto finishing;
+ }
+ skb->dev = bnad->netdev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ unmap_array[unmap_prod].skb = skb;
+ dma_addr = pci_map_single(bnad->pcidev, skb->data,
+ rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ rxent++;
+ wi_range--;
+ alloced++;
+ }
+
+finishing:
+ if (likely(alloced)) {
+ unmap_q->producer_index = unmap_prod;
+ rcb->producer_index = unmap_prod;
+ smp_mb();
+ bna_rxq_prod_indx_doorbell(rcb);
+ }
+}
+
+/*
+ * Locking is required in the enable path
+ * because it is called from a napi poll
+ * context, where the bna_lock is not held
+ * unlike the IRQ context.
+ */
+static void
+bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+ struct bna_tcb *tcb;
+ struct bna_ccb *ccb;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ bna_ib_coalescing_timer_set(tcb->i_dbell,
+ tcb->txq->ib->ib_config.coalescing_timeo);
+ bna_ib_ack(tcb->i_dbell, 0);
+ }
+ }
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
+ bnad_enable_rx_irq_unsafe(ccb);
+ }
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static inline void
+bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+}
+
+static u32
+bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ struct bna_rcb *rcb = NULL;
+ unsigned int wi_range, packets = 0, wis = 0;
+ struct bnad_unmap_q *unmap_q;
+ struct sk_buff *skb;
+ u32 flags;
+ u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
+ struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+
+ prefetch(bnad->netdev);
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
+ wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ while (cmpl->valid && packets < budget) {
+ packets++;
+ BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+ if (qid0 == cmpl->rxq_id)
+ rcb = ccb->rcb[0];
+ else
+ rcb = ccb->rcb[1];
+
+ unmap_q = rcb->unmap_q;
+
+ skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ BUG_ON(!(skb));
+ unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_q->
+ unmap_array[unmap_q->
+ consumer_index],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ PCI_DMA_FROMDEVICE);
+ BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+
+ /* Should be more efficient ? Performance ? */
+ BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ }
+ prefetch(next_cmpl);
+
+ flags = ntohl(cmpl->flags);
+ if (unlikely
+ (flags &
+ (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ dev_kfree_skb_any(skb);
+ rcb->rxq->rx_packets_with_error++;
+ goto next;
+ }
+
+ skb_put(skb, ntohs(cmpl->length));
+ if (likely
+ (bnad->rx_csum &&
+ (((flags & BNA_CQ_EF_IPV4) &&
+ (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+ (flags & BNA_CQ_EF_IPV6)) &&
+ (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+ (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+ if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
+ struct bnad_rx_ctrl *rx_ctrl =
+ (struct bnad_rx_ctrl *)ccb->ctrl;
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
+ ntohs(cmpl->vlan_tag), skb);
+ else
+ vlan_hwaccel_receive_skb(skb,
+ bnad->vlan_grp,
+ ntohs(cmpl->vlan_tag));
+
+ } else { /* Not VLAN tagged/stripped */
+ struct bnad_rx_ctrl *rx_ctrl =
+ (struct bnad_rx_ctrl *)ccb->ctrl;
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ctrl->napi, skb);
+ else
+ netif_receive_skb(skb);
+ }
+
+next:
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+
+ if (likely(ccb)) {
+ bna_ib_ack(ccb->i_dbell, packets);
+ bnad_refill_rxq(bnad, ccb->rcb[0]);
+ if (ccb->rcb[1])
+ bnad_refill_rxq(bnad, ccb->rcb[1]);
+ } else
+ bna_ib_ack(ccb->i_dbell, 0);
+
+ return packets;
+}
+
+static void
+bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
+ bna_ib_ack(ccb->i_dbell, 0);
+}
+
+static void
+bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
+ bnad_enable_rx_irq_unsafe(ccb);
+ spin_unlock_irq(&bnad->bna_lock);
+}
+
+static void
+bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+ if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
+ bnad_disable_rx_irq(bnad, ccb);
+ __napi_schedule((&rx_ctrl->napi));
+ }
+ BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
+}
+
+/* MSIX Rx Path Handler */
+static irqreturn_t
+bnad_msix_rx(int irq, void *data)
+{
+ struct bna_ccb *ccb = (struct bna_ccb *)data;
+ struct bnad *bnad = ccb->bnad;
+
+ bnad_netif_rx_schedule_poll(bnad, ccb);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handlers */
+
+/* Mbox Interrupt Handlers */
+static irqreturn_t
+bnad_msix_mbox_handler(int irq, void *data)
+{
+ u32 intr_status;
+ unsigned long flags;
+ struct net_device *netdev = data;
+ struct bnad *bnad;
+
+ bnad = netdev_priv(netdev);
+
+ /* BNA_ISR_GET(bnad); Inc Ref count */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+
+ if (BNA_IS_MBOX_ERR_INTR(intr_status))
+ bna_mbox_handler(&bnad->bna, intr_status);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* BNAD_ISR_PUT(bnad); Dec Ref count */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_isr(int irq, void *data)
+{
+ int i, j;
+ u32 intr_status;
+ unsigned long flags;
+ struct net_device *netdev = data;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+ if (!intr_status) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+ bna_mbox_handler(&bnad->bna, intr_status);
+ if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Process data interrupts */
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb)
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+done:
+ return IRQ_HANDLED;
+}
+
+/*
+ * Called in interrupt / callback context
+ * with bna_lock held, so cfg_flags access is OK
+ */
+static void
+bnad_enable_mbox_irq(struct bnad *bnad)
+{
+ int irq = BNAD_GET_MBOX_IRQ(bnad);
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ return;
+
+ if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+ enable_irq(irq);
+ BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
+}
+
+/*
+ * Called with bnad->bna_lock held b'cos of
+ * bnad->cfg_flags access.
+ */
+void
+bnad_disable_mbox_irq(struct bnad *bnad)
+{
+ int irq = BNAD_GET_MBOX_IRQ(bnad);
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ return;
+
+ if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+ disable_irq_nosync(irq);
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+}
+
+/* Control Path Handlers */
+
+/* Callbacks */
+void
+bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
+{
+ bnad_enable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
+{
+ bnad_disable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
+{
+ complete(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = status;
+}
+
+void
+bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
+{
+ complete(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = status;
+}
+
+static void
+bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.port_comp);
+
+ netif_carrier_off(bnad->netdev);
+}
+
+void
+bnad_cb_port_link_status(struct bnad *bnad,
+ enum bna_link_status link_status)
+{
+ bool link_up = 0;
+
+ link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
+
+ if (link_status == BNA_CEE_UP) {
+ set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+ BNAD_UPDATE_CTR(bnad, cee_up);
+ } else
+ clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+
+ if (link_up) {
+ if (!netif_carrier_ok(bnad->netdev)) {
+ pr_warn("bna: %s link up\n",
+ bnad->netdev->name);
+ netif_carrier_on(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
+ /* Force an immediate Transmit Schedule */
+ pr_info("bna: %s TX_STARTED\n",
+ bnad->netdev->name);
+ netif_wake_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ } else {
+ netif_stop_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+ }
+ } else {
+ if (netif_carrier_ok(bnad->netdev)) {
+ pr_warn("bna: %s link down\n",
+ bnad->netdev->name);
+ netif_carrier_off(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ }
+ }
+}
+
+static void
+bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.tx_comp);
+}
+
+static void
+bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+ tx_info->tcb[tcb->id] = tcb;
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+
+ tx_info->tcb[tcb->id] = NULL;
+}
+
+static void
+bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = ccb;
+ ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
+}
+
+static void
+bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = NULL;
+}
+
+static void
+bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+
+ if (tx_info != &bnad->tx_info[0])
+ return;
+
+ clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
+ netif_stop_queue(bnad->netdev);
+ pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
+}
+
+static void
+bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+ return;
+
+ if (netif_carrier_ok(bnad->netdev)) {
+ pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
+ netif_wake_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+}
+
+static void
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+ if (!tcb || (!tcb->unmap_q))
+ return;
+
+ if (!unmap_q->unmap_array)
+ return;
+
+ if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ return;
+
+ bnad_free_all_txbufs(bnad, tcb);
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+}
+
+static void
+bnad_cb_rx_cleanup(struct bnad *bnad,
+ struct bna_ccb *ccb)
+{
+ bnad_cq_cmpl_init(bnad, ccb);
+
+ bnad_free_rxbufs(bnad, ccb->rcb[0]);
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1]) {
+ bnad_free_rxbufs(bnad, ccb->rcb[1]);
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
+ }
+}
+
+static void
+bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+
+ /* Now allocate & post buffers for this RCB */
+ /* !!Allocation in callback context */
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+}
+
+static void
+bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.rx_comp);
+}
+
+static void
+bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ bnad->bnad_completions.mcast_comp_status = status;
+ complete(&bnad->bnad_completions.mcast_comp);
+}
+
+void
+bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats)
+{
+ if (status == BNA_CB_SUCCESS)
+ BNAD_UPDATE_CTR(bnad, hw_stats_updates);
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+}
+
+void
+bnad_cb_stats_clr(struct bnad *bnad)
+{
+}
+
+/* Resource allocation, free functions */
+
+static void
+bnad_mem_free(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if (mem_info->mdl == NULL)
+ return;
+
+ for (i = 0; i < mem_info->num; i++) {
+ if (mem_info->mdl[i].kva != NULL) {
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
+ dma_pa);
+ pci_free_consistent(bnad->pcidev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
+ } else
+ kfree(mem_info->mdl[i].kva);
+ }
+ }
+ kfree(mem_info->mdl);
+ mem_info->mdl = NULL;
+}
+
+static int
+bnad_mem_alloc(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if ((mem_info->num == 0) || (mem_info->len == 0)) {
+ mem_info->mdl = NULL;
+ return 0;
+ }
+
+ mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
+ GFP_KERNEL);
+ if (mem_info->mdl == NULL)
+ return -ENOMEM;
+
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva =
+ pci_alloc_consistent(bnad->pcidev,
+ mem_info->len, &dma_pa);
+
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+
+ BNA_SET_DMA_ADDR(dma_pa,
+ &(mem_info->mdl[i].dma));
+ }
+ } else {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva = kzalloc(mem_info->len,
+ GFP_KERNEL);
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+ }
+ }
+
+ return 0;
+
+err_return:
+ bnad_mem_free(bnad, mem_info);
+ return -ENOMEM;
+}
+
+/* Free IRQ for Mailbox */
+static void
+bnad_mbox_irq_free(struct bnad *bnad,
+ struct bna_intr_info *intr_info)
+{
+ int irq;
+ unsigned long flags;
+
+ if (intr_info->idl == NULL)
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bnad_disable_mbox_irq(bnad);
+
+ irq = BNAD_GET_MBOX_IRQ(bnad);
+ free_irq(irq, bnad->netdev);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ kfree(intr_info->idl);
+}
+
+/*
+ * Allocates IRQ for Mailbox, but keep it disabled
+ * This will be enabled once we get the mbox enable callback
+ * from bna
+ */
+static int
+bnad_mbox_irq_alloc(struct bnad *bnad,
+ struct bna_intr_info *intr_info)
+{
+ int err;
+ unsigned long flags;
+ u32 irq;
+ irq_handler_t irq_handler;
+
+ /* Mbox should use only 1 vector */
+
+ intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX) {
+ irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
+ irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ flags = 0;
+ intr_info->intr_type = BNA_INTR_T_MSIX;
+ intr_info->idl[0].vector = bnad->msix_num - 1;
+ } else {
+ irq_handler = (irq_handler_t)bnad_isr;
+ irq = bnad->pcidev->irq;
+ flags = IRQF_SHARED;
+ intr_info->intr_type = BNA_INTR_T_INTX;
+ /* intr_info->idl.vector = 0 ? */
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
+
+ err = request_irq(irq, irq_handler, flags,
+ bnad->mbox_irq_name, bnad->netdev);
+ if (err) {
+ kfree(intr_info->idl);
+ intr_info->idl = NULL;
+ return err;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_disable_mbox_irq(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return 0;
+}
+
+static void
+bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
+{
+ kfree(intr_info->idl);
+ intr_info->idl = NULL;
+}
+
+/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
+static int
+bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
+ uint txrx_id, struct bna_intr_info *intr_info)
+{
+ int i, vector_start = 0;
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ intr_info->intr_type = BNA_INTR_T_MSIX;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ vector_start = txrx_id;
+ break;
+
+ case BNAD_INTR_RX:
+ vector_start = bnad->num_tx * bnad->num_txq_per_tx +
+ txrx_id;
+ break;
+
+ default:
+ BUG();
+ }
+
+ for (i = 0; i < intr_info->num; i++)
+ intr_info->idl[i].vector = vector_start + i;
+ } else {
+ intr_info->intr_type = BNA_INTR_T_INTX;
+ intr_info->num = 1;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
+ break;
+
+ case BNAD_INTR_RX:
+ intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Tx MSIX vector(s) from the kernel
+ */
+static void
+bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ int num_txqs)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ if (tx_info->tcb[i] == NULL)
+ continue;
+
+ vector_num = tx_info->tcb[i]->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ uint tx_id, int num_txqs)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ vector_num = tx_info->tcb[i]->intr_vector;
+ sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_tx, 0,
+ tx_info->tcb[i]->name,
+ tx_info->tcb[i]);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
+ return -1;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Rx MSIX vector(s) from the kernel
+ */
+static void
+bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ int num_rxps)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ if (rx_info->rx_ctrl[i].ccb == NULL)
+ continue;
+
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector,
+ rx_info->rx_ctrl[i].ccb);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ uint rx_id, int num_rxps)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
+ bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_rx, 0,
+ rx_info->rx_ctrl[i].ccb->name,
+ rx_info->rx_ctrl[i].ccb);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
+ return -1;
+}
+
+/* Free Tx object Resources */
+static void
+bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Tx object */
+static int
+bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ uint tx_id)
+{
+ int i, err = 0;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Free Rx object Resources */
+static void
+bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Rx object */
+static int
+bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ uint rx_id)
+{
+ int i, err = 0;
+
+ /* All memory needs to be allocated before setup_ccbs */
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_rx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Timer callbacks */
+/* a) IOC timer */
+static void
+bnad_ioc_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_ioc_timeout((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_ioc_hb_check(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_ioc_hb_check((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_ioc_sem_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * All timer routines use bnad->bna_lock to protect against
+ * the following race, which may occur in case of no locking:
+ * Time CPU m CPU n
+ * 0 1 = test_bit
+ * 1 clear_bit
+ * 2 del_timer_sync
+ * 3 mod_timer
+ */
+
+/* b) Dynamic Interrupt Moderation Timer */
+static void
+bnad_dim_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i, j;
+ unsigned long flags;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (!rx_ctrl->ccb)
+ continue;
+ bna_rx_dim_update(rx_ctrl->ccb);
+ }
+ }
+
+ /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
+ if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/* c) Statistics Timer */
+static void
+bnad_stats_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_stats_get(&bnad->bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * Set up timer for DIM
+ * Called with bnad->bna_lock held
+ */
+void
+bnad_dim_timer_start(struct bnad *bnad)
+{
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->dim_timer, bnad_dim_timeout,
+ (unsigned long)bnad);
+ set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ }
+}
+
+/*
+ * Set up timer for statistics
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_start(struct bnad *bnad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->stats_timer, bnad_stats_timeout,
+ (unsigned long)bnad);
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+}
+
+/*
+ * Stops the stats timer
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_stop(struct bnad *bnad)
+{
+ int to_del = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ to_del = 1;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->stats_timer);
+}
+
+/* Utilities */
+
+static void
+bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
+{
+ int i = 1; /* Index 0 has broadcast address */
+ struct netdev_hw_addr *mc_addr;
+
+ netdev_for_each_mc_addr(mc_addr, netdev) {
+ memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
+ ETH_ALEN);
+ i++;
+ }
+}
+
+static int
+bnad_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct bnad_rx_ctrl *rx_ctrl =
+ container_of(napi, struct bnad_rx_ctrl, napi);
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ int rcvd = 0;
+
+ ccb = rx_ctrl->ccb;
+
+ bnad = ccb->bnad;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ goto poll_exit;
+
+ rcvd = bnad_poll_cq(bnad, ccb, budget);
+ if (rcvd == budget)
+ return rcvd;
+
+poll_exit:
+ napi_complete((napi));
+
+ BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+
+ bnad_enable_rx_irq(bnad, ccb);
+ return rcvd;
+}
+
+static int
+bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
+{
+ struct bnad_rx_ctrl *rx_ctrl =
+ container_of(napi, struct bnad_rx_ctrl, napi);
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ int rcvd = 0;
+ int i, j;
+
+ ccb = rx_ctrl->ccb;
+
+ bnad = ccb->bnad;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ goto poll_exit;
+
+ /* Handle Tx Completions, if any */
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+ }
+
+ /* Handle Rx Completions */
+ rcvd = bnad_poll_cq(bnad, ccb, budget);
+ if (rcvd == budget)
+ return rcvd;
+poll_exit:
+ napi_complete((napi));
+
+ BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+
+ bnad_enable_txrx_irqs(bnad);
+ return rcvd;
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+ int (*napi_poll) (struct napi_struct *, int);
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ napi_poll = bnad_napi_poll_rx;
+ else
+ napi_poll = bnad_napi_poll_txrx;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
+ netif_napi_add(bnad->netdev, &rx_ctrl->napi,
+ napi_poll, 64);
+ napi_enable(&rx_ctrl->napi);
+ }
+}
+
+static void
+bnad_napi_disable(struct bnad *bnad, u32 rx_id)
+{
+ int i;
+
+ /* First disable and then clean up */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ }
+}
+
+/* Should be held with conf_lock held */
+void
+bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
+{
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ unsigned long flags;
+
+ if (!tx_info->tx)
+ return;
+
+ init_completion(&bnad->bnad_completions.tx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.tx_comp);
+
+ if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
+ bnad_tx_msix_unregister(bnad, tx_info,
+ bnad->num_txq_per_tx);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ tx_info->tx = NULL;
+
+ if (0 == tx_id)
+ tasklet_kill(&bnad->tx_free_tasklet);
+
+ bnad_tx_res_free(bnad, res_info);
+}
+
+/* Should be held with conf_lock held */
+int
+bnad_setup_tx(struct bnad *bnad, uint tx_id)
+{
+ int err;
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
+ struct bna_tx_event_cbfn tx_cbfn;
+ struct bna_tx *tx;
+ unsigned long flags;
+
+ /* Initialize the Tx object configuration */
+ tx_config->num_txq = bnad->num_txq_per_tx;
+ tx_config->txq_depth = bnad->txq_depth;
+ tx_config->tx_type = BNA_TX_T_REGULAR;
+
+ /* Initialize the tx event handlers */
+ tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
+ tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
+ tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
+ tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
+ tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
+
+ /* Get BNA's resource requirement for one tx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_res_req(bnad->num_txq_per_tx,
+ bnad->txq_depth, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
+ bnad->num_txq_per_tx,
+ BNAD_TX_UNMAPQ_DEPTH);
+
+ /* Allocate resources */
+ err = bnad_tx_res_alloc(bnad, res_info, tx_id);
+ if (err)
+ return err;
+
+ /* Ask BNA to create one Tx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
+ tx_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!tx)
+ goto err_return;
+ tx_info->tx = tx;
+
+ /* Register ISR for the Tx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_tx_msix_register(bnad, tx_info,
+ tx_id, bnad->num_txq_per_tx);
+ if (err)
+ goto err_return;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_enable(tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Setup the rx config for bna_rx_create */
+/* bnad decides the configuration */
+static void
+bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
+{
+ rx_config->rx_type = BNA_RX_T_REGULAR;
+ rx_config->num_paths = bnad->num_rxp_per_rx;
+
+ if (bnad->num_rxp_per_rx > 1) {
+ rx_config->rss_status = BNA_STATUS_T_ENABLED;
+ rx_config->rss_config.hash_type =
+ (BFI_RSS_T_V4_TCP |
+ BFI_RSS_T_V6_TCP |
+ BFI_RSS_T_V4_IP |
+ BFI_RSS_T_V6_IP);
+ rx_config->rss_config.hash_mask =
+ bnad->num_rxp_per_rx - 1;
+ get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
+ sizeof(rx_config->rss_config.toeplitz_hash_key));
+ } else {
+ rx_config->rss_status = BNA_STATUS_T_DISABLED;
+ memset(&rx_config->rss_config, 0,
+ sizeof(rx_config->rss_config));
+ }
+ rx_config->rxp_type = BNA_RXP_SLR;
+ rx_config->q_depth = bnad->rxq_depth;
+
+ rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+
+ rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
+bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ unsigned long flags;
+ int dim_timer_del = 0;
+
+ if (!rx_info->rx)
+ return;
+
+ if (0 == rx_id) {
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ dim_timer_del = bnad_dim_timer_running(bnad);
+ if (dim_timer_del)
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (dim_timer_del)
+ del_timer_sync(&bnad->dim_timer);
+ }
+
+ bnad_napi_disable(bnad, rx_id);
+
+ init_completion(&bnad->bnad_completions.rx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.rx_comp);
+
+ if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
+ bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_destroy(rx_info->rx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ rx_info->rx = NULL;
+
+ bnad_rx_res_free(bnad, res_info);
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+int
+bnad_setup_rx(struct bnad *bnad, uint rx_id)
+{
+ int err;
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ struct bna_rx_event_cbfn rx_cbfn;
+ struct bna_rx *rx;
+ unsigned long flags;
+
+ /* Initialize the Rx object configuration */
+ bnad_init_rx_config(bnad, rx_config);
+
+ /* Initialize the Rx event handlers */
+ rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
+ rx_cbfn.rcb_destroy_cbfn = NULL;
+ rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
+ rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
+ rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
+ rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
+
+ /* Get BNA's resource requirement for one Rx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_res_req(rx_config, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
+ rx_config->num_paths +
+ ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
+ rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
+
+ /* Allocate resource */
+ err = bnad_rx_res_alloc(bnad, res_info, rx_id);
+ if (err)
+ return err;
+
+ /* Ask BNA to create one Rx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
+ rx_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!rx)
+ goto err_return;
+ rx_info->rx = rx;
+
+ /* Register ISR for the Rx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_rx_msix_register(bnad, rx_info, rx_id,
+ rx_config->num_paths);
+ if (err)
+ goto err_return;
+ }
+
+ /* Enable NAPI */
+ bnad_napi_enable(bnad, rx_id);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (0 == rx_id) {
+ /* Set up Dynamic Interrupt Moderation Vector */
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
+ bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
+
+ /* Enable VLAN filtering only on the default Rx */
+ bna_rx_vlanfilter_enable(rx);
+
+ /* Start the DIM timer */
+ bnad_dim_timer_start(bnad);
+ }
+
+ bna_rx_enable(rx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return 0;
+
+err_return:
+ bnad_cleanup_rx(bnad, rx_id);
+ return err;
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_tx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_tx_info *tx_info;
+
+ tx_info = &bnad->tx_info[0];
+ if (!tx_info->tx)
+ return;
+
+ bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_rx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info;
+ int i;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ bna_rx_coalescing_timeo_set(rx_info->rx,
+ bnad->rx_coalescing_timeo);
+ }
+}
+
+/*
+ * Called with bnad->bna_lock held
+ */
+static int
+bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
+{
+ int ret;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EADDRNOTAVAIL;
+
+ /* If datapath is down, pretend everything went through */
+ if (!bnad->rx_info[0].rx)
+ return 0;
+
+ ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+ if (ret != BNA_CB_SUCCESS)
+ return -EADDRNOTAVAIL;
+
+ return 0;
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_enable_default_bcast(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[0];
+ int ret;
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.mcast_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
+ bnad_cb_rx_mcast_add);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (ret == BNA_CB_SUCCESS)
+ wait_for_completion(&bnad->bnad_completions.mcast_comp);
+ else
+ return -ENODEV;
+
+ if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* Statistics utilities */
+void
+bnad_netdev_qstats_fill(struct bnad *bnad)
+{
+ struct net_device_stats *net_stats = &bnad->net_stats;
+ int i, j;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ net_stats->rx_packets += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
+ net_stats->rx_bytes += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ net_stats->rx_packets +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_packets;
+ net_stats->rx_bytes +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_bytes;
+ }
+ }
+ }
+ }
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ if (bnad->tx_info[i].tcb[j]) {
+ net_stats->tx_packets +=
+ bnad->tx_info[i].tcb[j]->txq->tx_packets;
+ net_stats->tx_bytes +=
+ bnad->tx_info[i].tcb[j]->txq->tx_bytes;
+ }
+ }
+ }
+}
+
+/*
+ * Must be called with the bna_lock held.
+ */
+void
+bnad_netdev_hwstats_fill(struct bnad *bnad)
+{
+ struct bfi_ll_stats_mac *mac_stats;
+ struct net_device_stats *net_stats = &bnad->net_stats;
+ u64 bmap;
+ int i;
+
+ mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
+ net_stats->rx_errors =
+ mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
+ mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
+ mac_stats->rx_undersize;
+ net_stats->tx_errors = mac_stats->tx_fcs_error +
+ mac_stats->tx_undersize;
+ net_stats->rx_dropped = mac_stats->rx_drop;
+ net_stats->tx_dropped = mac_stats->tx_drop;
+ net_stats->multicast = mac_stats->rx_multicast;
+ net_stats->collisions = mac_stats->tx_total_collision;
+
+ net_stats->rx_length_errors = mac_stats->rx_frame_length_error;
+
+ /* receive ring buffer overflow ?? */
+
+ net_stats->rx_crc_errors = mac_stats->rx_fcs_error;
+ net_stats->rx_frame_errors = mac_stats->rx_alignment_error;
+ /* recv'r fifo overrun */
+ bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
+ ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ net_stats->rx_fifo_errors =
+ bnad->stats.bna_stats->
+ hw_stats->rxf_stats[i].frame_drops;
+ break;
+ }
+ bmap >>= 1;
+ }
+}
+
+static void
+bnad_mbox_irq_sync(struct bnad *bnad)
+{
+ u32 irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ else
+ irq = bnad->pcidev->irq;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ synchronize_irq(irq);
+}
+
+/* Utility used by bnad_start_xmit, for doing TSO */
+static int
+bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+ int err;
+
+ /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
+ BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err) {
+ BNAD_UPDATE_CTR(bnad, tso_err);
+ return err;
+ }
+ }
+
+ /*
+ * For TSO, the TCP checksum field is seeded with pseudo-header sum
+ * excluding the length field.
+ */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* Do we really need these? */
+ iph->tot_len = 0;
+ iph->check = 0;
+
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso4);
+ } else {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
+ ipv6h->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso6);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize Q numbers depending on Rx Paths
+ * Called with bnad->bna_lock held, because of cfg_flags
+ * access.
+ */
+static void
+bnad_q_num_init(struct bnad *bnad)
+{
+ int rxps;
+
+ rxps = min((uint)num_online_cpus(),
+ (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ rxps = 1; /* INTx */
+
+ bnad->num_rx = 1;
+ bnad->num_tx = 1;
+ bnad->num_rxp_per_rx = rxps;
+ bnad->num_txq_per_tx = BNAD_TXQ_NUM;
+}
+
+/*
+ * Adjusts the Q numbers, given a number of msix vectors
+ * Give preference to RSS as opposed to Tx priority Queues,
+ * in such a case, just use 1 Tx Q
+ * Called with bnad->bna_lock held b'cos of cfg_flags access
+ */
+static void
+bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
+{
+ bnad->num_txq_per_tx = 1;
+ if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
+ bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
+ (bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bnad->num_rxp_per_rx = msix_vectors -
+ (bnad->num_tx * bnad->num_txq_per_tx) -
+ BNAD_MAILBOX_MSIX_VECTORS;
+ } else
+ bnad->num_rxp_per_rx = 1;
+}
+
+static void
+bnad_set_netdev_perm_addr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ if (is_zero_ether_addr(netdev->dev_addr))
+ memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+}
+
+/* Enable / disable device */
+static void
+bnad_device_disable(struct bnad *bnad)
+{
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.ioc_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.ioc_comp);
+
+}
+
+static int
+bnad_device_enable(struct bnad *bnad)
+{
+ int err = 0;
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.ioc_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_device_enable(&bnad->bna.device);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.ioc_comp);
+
+ if (bnad->bnad_completions.ioc_comp_status)
+ err = bnad->bnad_completions.ioc_comp_status;
+
+ return err;
+}
+
+/* Free BNA resources */
+static void
+bnad_res_free(struct bnad *bnad)
+{
+ int i;
+ struct bna_res_info *res_info = &bnad->res_info[0];
+
+ for (i = 0; i < BNA_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else
+ bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for BNA */
+static int
+bnad_res_alloc(struct bnad *bnad)
+{
+ int i, err;
+ struct bna_res_info *res_info = &bnad->res_info[0];
+
+ for (i = 0; i < BNA_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
+ else
+ err = bnad_mbox_irq_alloc(bnad,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_res_free(bnad);
+ return err;
+}
+
+/* Interrupt enable / disable */
+static void
+bnad_enable_msix(struct bnad *bnad)
+{
+ int i, ret;
+ u32 tot_msix_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (bnad->msix_table)
+ return;
+
+ tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
+
+ bnad->msix_table =
+ kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+
+ if (!bnad->msix_table)
+ goto intx_mode;
+
+ for (i = 0; i < tot_msix_num; i++)
+ bnad->msix_table[i].entry = i;
+
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
+ if (ret > 0) {
+ /* Not enough MSI-X vectors. */
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ /* ret = #of vectors that we got */
+ bnad_q_num_adjust(bnad, ret);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
+ + (bnad->num_rx
+ * bnad->num_rxp_per_rx) +
+ BNAD_MAILBOX_MSIX_VECTORS;
+ tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
+
+ /* Try once more with adjusted numbers */
+ /* If this fails, fall back to INTx */
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+ tot_msix_num);
+ if (ret)
+ goto intx_mode;
+
+ } else if (ret < 0)
+ goto intx_mode;
+ return;
+
+intx_mode:
+
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ bnad->msix_num = 0;
+ bnad->msix_diag_num = 0;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_disable_msix(struct bnad *bnad)
+{
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ pci_disable_msix(bnad->pcidev);
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ }
+}
+
+/* Netdev entry points */
+static int
+bnad_open(struct net_device *netdev)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ int mtu;
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Tx */
+ err = bnad_setup_tx(bnad, 0);
+ if (err)
+ goto err_return;
+
+ /* Rx */
+ err = bnad_setup_rx(bnad, 0);
+ if (err)
+ goto cleanup_tx;
+
+ /* Port */
+ pause_config.tx_pause = 0;
+ pause_config.rx_pause = 0;
+
+ mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
+ bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
+ bna_port_enable(&bnad->bna.port);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Enable broadcast */
+ bnad_enable_default_bcast(bnad);
+
+ /* Set the UCAST address */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Start the stats timer */
+ bnad_stats_timer_start(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+
+cleanup_tx:
+ bnad_cleanup_tx(bnad, 0);
+
+err_return:
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static int
+bnad_stop(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Stop the stats timer */
+ bnad_stats_timer_stop(bnad);
+
+ init_completion(&bnad->bnad_completions.port_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
+ bnad_cb_port_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.port_comp);
+
+ bnad_cleanup_tx(bnad, 0);
+ bnad_cleanup_rx(bnad, 0);
+
+ /* Synchronize mailbox IRQ */
+ bnad_mbox_irq_sync(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+}
+
+/* TX */
+/*
+ * bnad_start_xmit : Netdev entry point for Transmit
+ * Called under lock held by net_device
+ */
+static netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ u16 txq_prod, vlan_tag = 0;
+ u32 unmap_prod, wis, wis_used, wi_range;
+ u32 vectors, vect_id, i, acked;
+ u32 tx_id;
+ int err;
+
+ struct bnad_tx_info *tx_info;
+ struct bna_tcb *tcb;
+ struct bnad_unmap_q *unmap_q;
+ dma_addr_t dma_addr;
+ struct bna_txq_entry *txqent;
+ bna_txq_wi_ctrl_flag_t flags;
+
+ if (unlikely
+ (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * Takes care of the Tx that is scheduled between clearing the flag
+ * and the netif_stop_queue() call.
+ */
+ if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_id = 0;
+
+ tx_info = &bnad->tx_info[tx_id];
+ tcb = tx_info->tcb[tx_id];
+ unmap_q = tcb->unmap_q;
+
+ vectors = 1 + skb_shinfo(skb)->nr_frags;
+ if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
+ acked = 0;
+ if (unlikely
+ (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ if ((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index &&
+ !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ } else {
+ netif_stop_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+
+ smp_mb();
+ /*
+ * Check again to deal with race condition between
+ * netif_stop_queue here, and netif_wake_queue in
+ * interrupt handler which is not inside netif tx lock.
+ */
+ if (likely
+ (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ return NETDEV_TX_BUSY;
+ } else {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
+
+ unmap_prod = unmap_q->producer_index;
+ wis_used = 1;
+ vect_id = 0;
+ flags = 0;
+
+ txq_prod = tcb->producer_index;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
+ BUG_ON(!(wi_range <= tcb->q_depth));
+ txqent->hdr.wi.reserved = 0;
+ txqent->hdr.wi.num_vectors = vectors;
+ txqent->hdr.wi.opcode =
+ htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+ BNA_TXQ_WI_SEND));
+
+ if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
+ vlan_tag = (u16) vlan_tx_tag_get(skb);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+ if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
+ vlan_tag =
+ (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+
+ txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+ if (skb_is_gso(skb)) {
+ err = bnad_tso_prepare(bnad, skb);
+ if (err) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+ flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (tcp_hdrlen(skb) >> 2,
+ skb_transport_offset(skb)));
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 proto = 0;
+
+ txqent->hdr.wi.lso_mss = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* nexthdr may not be TCP immediately. */
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+ if (proto == IPPROTO_TCP) {
+ flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+ BUG_ON(!(skb_headlen(skb) >=
+ skb_transport_offset(skb) + tcp_hdrlen(skb)));
+
+ } else if (proto == IPPROTO_UDP) {
+ flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+
+ BUG_ON(!(skb_headlen(skb) >=
+ skb_transport_offset(skb) +
+ sizeof(struct udphdr)));
+ } else {
+ err = skb_checksum_help(skb);
+ BNAD_UPDATE_CTR(bnad, csum_help);
+ if (err) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, csum_help_err);
+ return NETDEV_TX_OK;
+ }
+ }
+ } else {
+ txqent->hdr.wi.lso_mss = 0;
+ txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+ }
+
+ txqent->hdr.wi.flags = htons(flags);
+
+ txqent->hdr.wi.frame_length = htonl(skb->len);
+
+ unmap_q->unmap_array[unmap_prod].skb = skb;
+ BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
+ txqent->vector[vect_id].length = htons(skb_headlen(skb));
+ dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+
+ if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
+ vect_id = 0;
+ if (--wi_range)
+ txqent++;
+ else {
+ BNA_QE_INDX_ADD(txq_prod, wis_used,
+ tcb->q_depth);
+ wis_used = 0;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
+ txqent, wi_range);
+ BUG_ON(!(wi_range <= tcb->q_depth));
+ }
+ wis_used++;
+ txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+ }
+
+ BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
+ txqent->vector[vect_id].length = htons(size);
+ dma_addr =
+ pci_map_page(bnad->pcidev, frag->page,
+ frag->page_offset, size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ }
+
+ unmap_q->producer_index = unmap_prod;
+ BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
+ tcb->producer_index = txq_prod;
+
+ smp_mb();
+ bna_txq_prod_indx_doorbell(tcb);
+
+ if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
+ tasklet_schedule(&bnad->tx_free_tasklet);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Used spin_lock to synchronize reading of stats structures, which
+ * is written by BNA under the same lock.
+ */
+static struct net_device_stats *
+bnad_get_netdev_stats(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ memset(&bnad->net_stats, 0, sizeof(struct net_device_stats));
+
+ bnad_netdev_qstats_fill(bnad);
+ bnad_netdev_hwstats_fill(bnad);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return &bnad->net_stats;
+}
+
+static void
+bnad_set_rx_mode(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ u32 new_mask, valid_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ new_mask = valid_mask = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
+ new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_PROMISC) {
+ new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags &= ~BNAD_CF_PROMISC;
+ }
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
+ new_mask |= BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
+ new_mask &= ~BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
+ }
+ }
+
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+
+ if (!netdev_mc_empty(netdev)) {
+ u8 *mcaddr_list;
+ int mc_count = netdev_mc_count(netdev);
+
+ /* Index 0 holds the broadcast address */
+ mcaddr_list =
+ kzalloc((mc_count + 1) * ETH_ALEN,
+ GFP_ATOMIC);
+ if (!mcaddr_list)
+ return;
+
+ memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* Copy rest of the MC addresses */
+ bnad_netdev_mc_list_get(netdev, mcaddr_list);
+
+ bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mcaddr_list, NULL);
+
+ /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+ kfree(mcaddr_list);
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * bna_lock is used to sync writes to netdev->addr
+ * conf_lock cannot be used since this call may be made
+ * in a non-blocking context.
+ */
+static int
+bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct sockaddr *sa = (struct sockaddr *)mac_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
+
+ if (!err)
+ memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return err;
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int mtu, err = 0;
+ unsigned long flags;
+
+ struct bnad *bnad = netdev_priv(netdev);
+
+ if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ netdev->mtu = new_mtu;
+
+ mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *vlan_grp)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad->vlan_grp = vlan_grp;
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void
+bnad_vlan_rx_add_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+bnad_netpoll(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ u32 curr_mask;
+ int i, j;
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bna_intx_disable(&bnad->bna, curr_mask);
+ bnad_isr(bnad->pcidev->irq, netdev);
+ bna_intx_enable(&bnad->bna, curr_mask);
+ } else {
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb) {
+ bnad_disable_rx_irq(bnad,
+ rx_ctrl->ccb);
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+ }
+ }
+}
+#endif
+
+static const struct net_device_ops bnad_netdev_ops = {
+ .ndo_open = bnad_open,
+ .ndo_stop = bnad_stop,
+ .ndo_start_xmit = bnad_start_xmit,
+ .ndo_get_stats = bnad_get_netdev_stats,
+ .ndo_set_rx_mode = bnad_set_rx_mode,
+ .ndo_set_multicast_list = bnad_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnad_set_mac_address,
+ .ndo_change_mtu = bnad_change_mtu,
+ .ndo_vlan_rx_register = bnad_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnad_netpoll
+#endif
+};
+
+static void
+bnad_netdev_init(struct bnad *bnad, bool using_dac)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+
+ netdev->features |= NETIF_F_GRO;
+ pr_warn("bna: GRO enabled, using kernel stack GRO\n");
+
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+
+ if (using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |=
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features = netdev->features;
+ netdev->mem_start = bnad->mmio_start;
+ netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
+
+ netdev->netdev_ops = &bnad_netdev_ops;
+ bnad_set_ethtool_ops(netdev);
+}
+
+/*
+ * 1. Initialize the bnad structure
+ * 2. Setup netdev pointer in pci_dev
+ * 3. Initialze Tx free tasklet
+ * 4. Initialize no. of TxQ & CQs & MSIX vectors
+ */
+static int
+bnad_init(struct bnad *bnad,
+ struct pci_dev *pdev, struct net_device *netdev)
+{
+ unsigned long flags;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ bnad->netdev = netdev;
+ bnad->pcidev = pdev;
+ bnad->mmio_start = pci_resource_start(pdev, 0);
+ bnad->mmio_len = pci_resource_len(pdev, 0);
+ bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+ if (!bnad->bar0) {
+ dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+ pci_set_drvdata(pdev, NULL);
+ return -ENOMEM;
+ }
+ pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
+ (unsigned long long) bnad->mmio_len);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!bnad_msix_disable)
+ bnad->cfg_flags = BNAD_CF_MSIX;
+
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
+ (bnad->num_rx * bnad->num_rxp_per_rx) +
+ BNAD_MAILBOX_MSIX_VECTORS;
+ bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
+
+ bnad->txq_depth = BNAD_TXQ_DEPTH;
+ bnad->rxq_depth = BNAD_RXQ_DEPTH;
+ bnad->rx_csum = true;
+
+ bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
+ bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
+
+ tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+ (unsigned long)bnad);
+
+ return 0;
+}
+
+/*
+ * Must be called after bnad_pci_uninit()
+ * so that iounmap() and pci_set_drvdata(NULL)
+ * happens only after PCI uninitialization.
+ */
+static void
+bnad_uninit(struct bnad *bnad)
+{
+ if (bnad->bar0)
+ iounmap(bnad->bar0);
+ pci_set_drvdata(bnad->pcidev, NULL);
+}
+
+/*
+ * Initialize locks
+ a) Per device mutes used for serializing configuration
+ changes from OS interface
+ b) spin lock used to protect bna state machine
+ */
+static void
+bnad_lock_init(struct bnad *bnad)
+{
+ spin_lock_init(&bnad->bna_lock);
+ mutex_init(&bnad->conf_mutex);
+}
+
+static void
+bnad_lock_uninit(struct bnad *bnad)
+{
+ mutex_destroy(&bnad->conf_mutex);
+}
+
+/* PCI Initialization */
+static int
+bnad_pci_init(struct bnad *bnad,
+ struct pci_dev *pdev, bool *using_dac)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ err = pci_request_regions(pdev, BNAD_NAME);
+ if (err)
+ goto disable_device;
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ *using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err)
+ goto release_regions;
+ }
+ *using_dac = 0;
+ }
+ pci_set_master(pdev);
+ return 0;
+
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void
+bnad_pci_uninit(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pcidev_id)
+{
+ bool using_dac;
+ int err;
+ struct bnad *bnad;
+ struct bna *bna;
+ struct net_device *netdev;
+ struct bfa_pcidev pcidev_info;
+ unsigned long flags;
+
+ pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+ pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&bnad_fwimg_mutex);
+ if (!cna_get_firmware_buf(pdev)) {
+ mutex_unlock(&bnad_fwimg_mutex);
+ pr_warn("Failed to load Firmware Image!\n");
+ return -ENODEV;
+ }
+ mutex_unlock(&bnad_fwimg_mutex);
+
+ /*
+ * Allocates sizeof(struct net_device + struct bnad)
+ * bnad = netdev->priv
+ */
+ netdev = alloc_etherdev(sizeof(struct bnad));
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
+ err = -ENOMEM;
+ return err;
+ }
+ bnad = netdev_priv(netdev);
+
+
+ /*
+ * PCI initialization
+ * Output : using_dac = 1 for 64 bit DMA
+ * = 0 for 32 bit DMA
+ */
+ err = bnad_pci_init(bnad, pdev, &using_dac);
+ if (err)
+ goto free_netdev;
+
+ bnad_lock_init(bnad);
+ /*
+ * Initialize bnad structure
+ * Setup relation between pci_dev & netdev
+ * Init Tx free tasklet
+ */
+ err = bnad_init(bnad, pdev, netdev);
+ if (err)
+ goto pci_uninit;
+ /* Initialize netdev structure, set up ethtool ops */
+ bnad_netdev_init(bnad, using_dac);
+
+ bnad_enable_msix(bnad);
+
+ /* Get resource requirement form bna */
+ bna_res_req(&bnad->res_info[0]);
+
+ /* Allocate resources from bna */
+ err = bnad_res_alloc(bnad);
+ if (err)
+ goto free_netdev;
+
+ bna = &bnad->bna;
+
+ /* Setup pcidev_info for bna_init() */
+ pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+ pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+ pcidev_info.device_id = bnad->pcidev->device;
+ pcidev_info.pci_bar_kva = bnad->bar0;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->stats.bna_stats = &bna->stats;
+
+ /* Set up timers */
+ setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
+ ((unsigned long)bnad));
+
+ /* Now start the timer before calling IOC */
+ mod_timer(&bnad->bna.device.ioc.ioc_timer,
+ jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
+
+ /*
+ * Start the chip
+ * Don't care even if err != 0, bna state machine will
+ * deal with it
+ */
+ err = bnad_device_enable(bnad);
+
+ /* Get the burnt-in mac */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mac_get(&bna->port, &bnad->perm_addr);
+ bnad_set_netdev_perm_addr(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ /*
+ * Make sure the link appears down to the stack
+ */
+ netif_carrier_off(netdev);
+
+ /* Finally, reguister with net_device layer */
+ err = register_netdev(netdev);
+ if (err) {
+ pr_err("BNA : Registering with netdev failed\n");
+ goto disable_device;
+ }
+
+ return 0;
+
+disable_device:
+ mutex_lock(&bnad->conf_mutex);
+ bnad_device_disable(bnad);
+ del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.device.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
+ bnad_res_free(bnad);
+ bnad_disable_msix(bnad);
+pci_uninit:
+ bnad_pci_uninit(pdev);
+ bnad_lock_uninit(bnad);
+ bnad_uninit(bnad);
+free_netdev:
+ free_netdev(netdev);
+ return err;
+}
+
+static void __devexit
+bnad_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnad *bnad;
+ struct bna *bna;
+ unsigned long flags;
+
+ if (!netdev)
+ return;
+
+ pr_info("%s bnad_pci_remove\n", netdev->name);
+ bnad = netdev_priv(netdev);
+ bna = &bnad->bna;
+
+ unregister_netdev(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad_device_disable(bnad);
+ del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.device.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
+ bnad_res_free(bnad);
+ bnad_disable_msix(bnad);
+ bnad_pci_uninit(pdev);
+ bnad_lock_uninit(bnad);
+ bnad_uninit(bnad);
+ free_netdev(netdev);
+}
+
+const struct pci_device_id bnad_pci_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
+ PCI_DEVICE_ID_BROCADE_CT),
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+ .class_mask = 0xffff00
+ }, {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static struct pci_driver bnad_pci_driver = {
+ .name = BNAD_NAME,
+ .id_table = bnad_pci_id_table,
+ .probe = bnad_pci_probe,
+ .remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init
+bnad_module_init(void)
+{
+ int err;
+
+ pr_info("Brocade 10G Ethernet driver\n");
+
+ bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+ err = pci_register_driver(&bnad_pci_driver);
+ if (err < 0) {
+ pr_err("bna : PCI registration failed in module init "
+ "(%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit
+bnad_module_exit(void)
+{
+ pci_unregister_driver(&bnad_pci_driver);
+
+ if (bfi_fw)
+ release_firmware(bfi_fw);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
+MODULE_FIRMWARE(CNA_FW_FILE_CT);
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNAD_H__
+#define __BNAD_H__
+
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/ipv6.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+
+/* Fix for IA64 */
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#include "bna.h"
+
+#define BNAD_TXQ_DEPTH 2048
+#define BNAD_RXQ_DEPTH 2048
+
+#define BNAD_MAX_TXS 1
+#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
+#define BNAD_TXQ_NUM 1
+
+#define BNAD_MAX_RXS 1
+#define BNAD_MAX_RXPS_PER_RX 16
+
+/*
+ * Control structure pointed to ccb->ctrl, which
+ * determines the NAPI / LRO behavior CCB
+ * There is 1:1 corres. between ccb & ctrl
+ */
+struct bnad_rx_ctrl {
+ struct bna_ccb *ccb;
+ struct napi_struct napi;
+};
+
+#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
+
+#define BNAD_GET_TX_ID(_skb) (0)
+
+/*
+ * GLOBAL #defines (CONSTANTS)
+ */
+#define BNAD_NAME "bna"
+#define BNAD_NAME_LEN 64
+
+#define BNAD_VERSION "2.3.2.0"
+
+#define BNAD_MAILBOX_MSIX_VECTORS 1
+
+#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
+
+#define BNAD_MAX_Q_DEPTH 0x10000
+#define BNAD_MIN_Q_DEPTH 0x200
+
+#define BNAD_JUMBO_MTU 9000
+
+#define BNAD_NETIF_WAKE_THRESHOLD 8
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
+
+/* Bit positions for tcb->flags */
+#define BNAD_TXQ_FREE_SENT 0
+
+/* Bit positions for rcb->flags */
+#define BNAD_RXQ_REFILL 0
+#define BNAD_RXQ_STARTED 1
+
+/*
+ * DATA STRUCTURES
+ */
+
+/* enums */
+enum bnad_intr_source {
+ BNAD_INTR_TX = 1,
+ BNAD_INTR_RX = 2
+};
+
+enum bnad_link_state {
+ BNAD_LS_DOWN = 0,
+ BNAD_LS_UP = 1
+};
+
+struct bnad_completion {
+ struct completion ioc_comp;
+ struct completion ucast_comp;
+ struct completion mcast_comp;
+ struct completion tx_comp;
+ struct completion rx_comp;
+ struct completion stats_comp;
+ struct completion port_comp;
+
+ u8 ioc_comp_status;
+ u8 ucast_comp_status;
+ u8 mcast_comp_status;
+ u8 tx_comp_status;
+ u8 rx_comp_status;
+ u8 stats_comp_status;
+ u8 port_comp_status;
+};
+
+/* Tx Rx Control Stats */
+struct bnad_drv_stats {
+ u64 netif_queue_stop;
+ u64 netif_queue_wakeup;
+ u64 tso4;
+ u64 tso6;
+ u64 tso_err;
+ u64 tcpcsum_offload;
+ u64 udpcsum_offload;
+ u64 csum_help;
+ u64 csum_help_err;
+
+ u64 hw_stats_updates;
+ u64 netif_rx_schedule;
+ u64 netif_rx_complete;
+ u64 netif_rx_dropped;
+
+ u64 link_toggle;
+ u64 cee_up;
+
+ u64 rxp_info_alloc_failed;
+ u64 mbox_intr_disabled;
+ u64 mbox_intr_enabled;
+ u64 tx_unmap_q_alloc_failed;
+ u64 rx_unmap_q_alloc_failed;
+
+ u64 rxbuf_alloc_failed;
+};
+
+/* Complete driver stats */
+struct bnad_stats {
+ struct bnad_drv_stats drv_stats;
+ struct bna_stats *bna_stats;
+};
+
+/* Tx / Rx Resources */
+struct bnad_tx_res_info {
+ struct bna_res_info res_info[BNA_TX_RES_T_MAX];
+};
+
+struct bnad_rx_res_info {
+ struct bna_res_info res_info[BNA_RX_RES_T_MAX];
+};
+
+struct bnad_tx_info {
+ struct bna_tx *tx; /* 1:1 between tx_info & tx */
+ struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
+} ____cacheline_aligned;
+
+struct bnad_rx_info {
+ struct bna_rx *rx; /* 1:1 between rx_info & rx */
+
+ struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
+} ____cacheline_aligned;
+
+/* Unmap queues for Tx / Rx cleanup */
+struct bnad_skb_unmap {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ /* This should be the last one */
+ struct bnad_skb_unmap unmap_array[1];
+};
+
+/* Bit mask values for bnad->cfg_flags */
+#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
+#define BNAD_CF_PROMISC 0x02
+#define BNAD_CF_ALLMULTI 0x04
+#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+
+/* Defines for run_flags bit-mask */
+/* Set, tested & cleared using xxx_bit() functions */
+/* Values indicated bit positions */
+#define BNAD_RF_CEE_RUNNING 1
+#define BNAD_RF_HW_ERROR 2
+#define BNAD_RF_MBOX_IRQ_DISABLED 3
+#define BNAD_RF_TX_STARTED 4
+#define BNAD_RF_RX_STARTED 5
+#define BNAD_RF_DIM_TIMER_RUNNING 6
+#define BNAD_RF_STATS_TIMER_RUNNING 7
+
+struct bnad {
+ struct net_device *netdev;
+
+ /* Data path */
+ struct bnad_tx_info tx_info[BNAD_MAX_TXS];
+ struct bnad_rx_info rx_info[BNAD_MAX_RXS];
+
+ struct vlan_group *vlan_grp;
+ /*
+ * These q numbers are global only because
+ * they are used to calculate MSIx vectors.
+ * Actually the exact # of queues are per Tx/Rx
+ * object.
+ */
+ u32 num_tx;
+ u32 num_rx;
+ u32 num_txq_per_tx;
+ u32 num_rxp_per_rx;
+
+ u32 txq_depth;
+ u32 rxq_depth;
+
+ u8 tx_coalescing_timeo;
+ u8 rx_coalescing_timeo;
+
+ struct bna_rx_config rx_config[BNAD_MAX_RXS];
+ struct bna_tx_config tx_config[BNAD_MAX_TXS];
+
+ u32 rx_csum;
+
+ void __iomem *bar0; /* BAR0 address */
+
+ struct bna bna;
+
+ u32 cfg_flags;
+ unsigned long run_flags;
+
+ struct pci_dev *pcidev;
+ u64 mmio_start;
+ u64 mmio_len;
+
+ u32 msix_num;
+ u32 msix_diag_num;
+ struct msix_entry *msix_table;
+
+ struct mutex conf_mutex;
+ spinlock_t bna_lock ____cacheline_aligned;
+
+ /* Timers */
+ struct timer_list ioc_timer;
+ struct timer_list dim_timer;
+ struct timer_list stats_timer;
+
+ /* Control path resources, memory & irq */
+ struct bna_res_info res_info[BNA_RES_T_MAX];
+ struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
+ struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
+
+ struct bnad_completion bnad_completions;
+
+ /* Burnt in MAC address */
+ mac_t perm_addr;
+
+ struct tasklet_struct tx_free_tasklet;
+
+ /* Statistics */
+ struct bnad_stats stats;
+ struct net_device_stats net_stats;
+
+ struct bnad_diag *diag;
+
+ char adapter_name[BNAD_NAME_LEN];
+ char port_name[BNAD_NAME_LEN];
+ char mbox_irq_name[BNAD_NAME_LEN];
+};
+
+/*
+ * EXTERN VARIABLES
+ */
+extern struct firmware *bfi_fw;
+extern u32 bnad_rxqs_per_cq;
+
+/*
+ * EXTERN PROTOTYPES
+ */
+extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+/* Netdev entry point prototypes */
+extern void bnad_set_ethtool_ops(struct net_device *netdev);
+
+/* Configuration & setup */
+extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+
+extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
+extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
+extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
+extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
+
+/* Timer start/stop protos */
+extern void bnad_dim_timer_start(struct bnad *bnad);
+
+/* Statistics */
+extern void bnad_netdev_qstats_fill(struct bnad *bnad);
+extern void bnad_netdev_hwstats_fill(struct bnad *bnad);
+
+/**
+ * MACROS
+ */
+/* To set & get the stats counters */
+#define BNAD_UPDATE_CTR(_bnad, _ctr) \
+ (((_bnad)->stats.drv_stats._ctr)++)
+
+#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
+
+#define bnad_enable_rx_irq_unsafe(_ccb) \
+{ \
+ bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
+ (_ccb)->rx_coalescing_timeo); \
+ bna_ib_ack((_ccb)->i_dbell, 0); \
+}
+
+#define bnad_dim_timer_running(_bnad) \
+ (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
+ (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
+
+#endif /* __BNAD_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "cna.h"
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+
+#include "bna.h"
+
+#include "bnad.h"
+
+#define BNAD_NUM_TXF_COUNTERS 12
+#define BNAD_NUM_RXF_COUNTERS 10
+#define BNAD_NUM_CQ_COUNTERS 3
+#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_TXQ_COUNTERS 5
+
+#define BNAD_ETHTOOL_STATS_NUM \
+ (sizeof(struct net_device_stats) / sizeof(unsigned long) + \
+ sizeof(struct bnad_drv_stats) / sizeof(u64) + \
+ offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
+
+static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "multicast",
+ "collisions",
+
+ "rx_length_errors",
+ "rx_over_errors",
+ "rx_crc_errors",
+ "rx_frame_errors",
+ "rx_fifo_errors",
+ "rx_missed_errors",
+
+ "tx_aborted_errors",
+ "tx_carrier_errors",
+ "tx_fifo_errors",
+ "tx_heartbeat_errors",
+ "tx_window_errors",
+
+ "rx_compressed",
+ "tx_compressed",
+
+ "netif_queue_stop",
+ "netif_queue_wakeup",
+ "tso4",
+ "tso6",
+ "tso_err",
+ "tcpcsum_offload",
+ "udpcsum_offload",
+ "csum_help",
+ "csum_help_err",
+ "hw_stats_updates",
+ "netif_rx_schedule",
+ "netif_rx_complete",
+ "netif_rx_dropped",
+
+ "link_toggle",
+ "cee_up",
+
+ "rxp_info_alloc_failed",
+ "mbox_intr_disabled",
+ "mbox_intr_enabled",
+ "tx_unmap_q_alloc_failed",
+ "rx_unmap_q_alloc_failed",
+ "rxbuf_alloc_failed",
+
+ "mac_frame_64",
+ "mac_frame_65_127",
+ "mac_frame_128_255",
+ "mac_frame_256_511",
+ "mac_frame_512_1023",
+ "mac_frame_1024_1518",
+ "mac_frame_1518_1522",
+ "mac_rx_bytes",
+ "mac_rx_packets",
+ "mac_rx_fcs_error",
+ "mac_rx_multicast",
+ "mac_rx_broadcast",
+ "mac_rx_control_frames",
+ "mac_rx_pause",
+ "mac_rx_unknown_opcode",
+ "mac_rx_alignment_error",
+ "mac_rx_frame_length_error",
+ "mac_rx_code_error",
+ "mac_rx_carrier_sense_error",
+ "mac_rx_undersize",
+ "mac_rx_oversize",
+ "mac_rx_fragments",
+ "mac_rx_jabber",
+ "mac_rx_drop",
+
+ "mac_tx_bytes",
+ "mac_tx_packets",
+ "mac_tx_multicast",
+ "mac_tx_broadcast",
+ "mac_tx_pause",
+ "mac_tx_deferral",
+ "mac_tx_excessive_deferral",
+ "mac_tx_single_collision",
+ "mac_tx_muliple_collision",
+ "mac_tx_late_collision",
+ "mac_tx_excessive_collision",
+ "mac_tx_total_collision",
+ "mac_tx_pause_honored",
+ "mac_tx_drop",
+ "mac_tx_jabber",
+ "mac_tx_fcs_error",
+ "mac_tx_control_frame",
+ "mac_tx_oversize",
+ "mac_tx_undersize",
+ "mac_tx_fragments",
+
+ "bpc_tx_pause_0",
+ "bpc_tx_pause_1",
+ "bpc_tx_pause_2",
+ "bpc_tx_pause_3",
+ "bpc_tx_pause_4",
+ "bpc_tx_pause_5",
+ "bpc_tx_pause_6",
+ "bpc_tx_pause_7",
+ "bpc_tx_zero_pause_0",
+ "bpc_tx_zero_pause_1",
+ "bpc_tx_zero_pause_2",
+ "bpc_tx_zero_pause_3",
+ "bpc_tx_zero_pause_4",
+ "bpc_tx_zero_pause_5",
+ "bpc_tx_zero_pause_6",
+ "bpc_tx_zero_pause_7",
+ "bpc_tx_first_pause_0",
+ "bpc_tx_first_pause_1",
+ "bpc_tx_first_pause_2",
+ "bpc_tx_first_pause_3",
+ "bpc_tx_first_pause_4",
+ "bpc_tx_first_pause_5",
+ "bpc_tx_first_pause_6",
+ "bpc_tx_first_pause_7",
+
+ "bpc_rx_pause_0",
+ "bpc_rx_pause_1",
+ "bpc_rx_pause_2",
+ "bpc_rx_pause_3",
+ "bpc_rx_pause_4",
+ "bpc_rx_pause_5",
+ "bpc_rx_pause_6",
+ "bpc_rx_pause_7",
+ "bpc_rx_zero_pause_0",
+ "bpc_rx_zero_pause_1",
+ "bpc_rx_zero_pause_2",
+ "bpc_rx_zero_pause_3",
+ "bpc_rx_zero_pause_4",
+ "bpc_rx_zero_pause_5",
+ "bpc_rx_zero_pause_6",
+ "bpc_rx_zero_pause_7",
+ "bpc_rx_first_pause_0",
+ "bpc_rx_first_pause_1",
+ "bpc_rx_first_pause_2",
+ "bpc_rx_first_pause_3",
+ "bpc_rx_first_pause_4",
+ "bpc_rx_first_pause_5",
+ "bpc_rx_first_pause_6",
+ "bpc_rx_first_pause_7",
+
+ "rad_rx_frames",
+ "rad_rx_octets",
+ "rad_rx_vlan_frames",
+ "rad_rx_ucast",
+ "rad_rx_ucast_octets",
+ "rad_rx_ucast_vlan",
+ "rad_rx_mcast",
+ "rad_rx_mcast_octets",
+ "rad_rx_mcast_vlan",
+ "rad_rx_bcast",
+ "rad_rx_bcast_octets",
+ "rad_rx_bcast_vlan",
+ "rad_rx_drops",
+
+ "fc_rx_ucast_octets",
+ "fc_rx_ucast",
+ "fc_rx_ucast_vlan",
+ "fc_rx_mcast_octets",
+ "fc_rx_mcast",
+ "fc_rx_mcast_vlan",
+ "fc_rx_bcast_octets",
+ "fc_rx_bcast",
+ "fc_rx_bcast_vlan",
+
+ "fc_tx_ucast_octets",
+ "fc_tx_ucast",
+ "fc_tx_ucast_vlan",
+ "fc_tx_mcast_octets",
+ "fc_tx_mcast",
+ "fc_tx_mcast_vlan",
+ "fc_tx_bcast_octets",
+ "fc_tx_bcast",
+ "fc_tx_bcast_vlan",
+ "fc_tx_parity_errors",
+ "fc_tx_timeout",
+ "fc_tx_fid_parity_errors",
+};
+
+static int
+bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int
+bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ /* 10G full duplex setting supported only */
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP; else {
+ if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void
+bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bfa_ioc_attr *ioc_attr;
+ unsigned long flags;
+
+ strcpy(drvinfo->driver, BNAD_NAME);
+ strcpy(drvinfo->version, BNAD_VERSION);
+
+ ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
+ if (ioc_attr) {
+ memset(ioc_attr, 0, sizeof(*ioc_attr));
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version) - 1);
+ kfree(ioc_attr);
+ }
+
+ strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+}
+
+static int
+get_regs(struct bnad *bnad, u32 * regs)
+{
+ int num = 0, i;
+ u32 reg_addr;
+ unsigned long flags;
+
+#define BNAD_GET_REG(addr) \
+do { \
+ if (regs) \
+ regs[num++] = readl(bnad->bar0 + (addr)); \
+ else \
+ num++; \
+} while (0)
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ /* DMA Block Internal Registers */
+ BNAD_GET_REG(DMA_CTRL_REG0);
+ BNAD_GET_REG(DMA_CTRL_REG1);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS);
+ BNAD_GET_REG(DMA_ERR_INT_ENABLE);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
+
+ /* APP Block Register Address Offset from BAR0 */
+ BNAD_GET_REG(HOSTFN0_INT_STATUS);
+ BNAD_GET_REG(HOSTFN0_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN0);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
+ BNAD_GET_REG(FN0_PCIE_ERR_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN1_INT_STATUS);
+ BNAD_GET_REG(HOSTFN1_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN1);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
+ BNAD_GET_REG(FN1_PCIE_ERR_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(PCIE_MISC_REG);
+
+ BNAD_GET_REG(HOST_SEM0_REG);
+ BNAD_GET_REG(HOST_SEM1_REG);
+ BNAD_GET_REG(HOST_SEM2_REG);
+ BNAD_GET_REG(HOST_SEM3_REG);
+ BNAD_GET_REG(HOST_SEM0_INFO_REG);
+ BNAD_GET_REG(HOST_SEM1_INFO_REG);
+ BNAD_GET_REG(HOST_SEM2_INFO_REG);
+ BNAD_GET_REG(HOST_SEM3_INFO_REG);
+
+ BNAD_GET_REG(TEMPSENSE_CNTL_REG);
+ BNAD_GET_REG(TEMPSENSE_STAT_REG);
+
+ BNAD_GET_REG(APP_LOCAL_ERR_STAT);
+ BNAD_GET_REG(APP_LOCAL_ERR_MSK);
+
+ BNAD_GET_REG(PCIE_LNK_ERR_STAT);
+ BNAD_GET_REG(PCIE_LNK_ERR_MSK);
+
+ BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
+ BNAD_GET_REG(RESV_ETH_TYPE);
+
+ BNAD_GET_REG(HOSTFN2_INT_STATUS);
+ BNAD_GET_REG(HOSTFN2_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN2);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
+ BNAD_GET_REG(FN2_PCIE_ERR_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN3_INT_STATUS);
+ BNAD_GET_REG(HOSTFN3_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN3);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
+ BNAD_GET_REG(FN3_PCIE_ERR_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
+
+ /* Host Command Status Registers */
+ reg_addr = HOST_CMDSTS0_CLR_REG;
+ for (i = 0; i < 16; i++) {
+ BNAD_GET_REG(reg_addr);
+ BNAD_GET_REG(reg_addr + 4);
+ BNAD_GET_REG(reg_addr + 8);
+ reg_addr += 0x10;
+ }
+
+ /* Function ID register */
+ BNAD_GET_REG(FNC_ID_REG);
+
+ /* Function personality register */
+ BNAD_GET_REG(FNC_PERS_REG);
+
+ /* Operation mode register */
+ BNAD_GET_REG(OP_MODE);
+
+ /* LPU0 Registers */
+ BNAD_GET_REG(LPU0_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU0_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU0_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU0_ERR_SET_REG);
+
+ /* LPU1 Registers */
+ BNAD_GET_REG(LPU1_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU1_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU1_ERR_SET_REG);
+
+ /* PSS Registers */
+ BNAD_GET_REG(PSS_CTL_REG);
+ BNAD_GET_REG(PSS_ERR_STATUS_REG);
+ BNAD_GET_REG(ERR_STATUS_SET);
+ BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
+
+ /* Catapult CPQ Registers */
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
+
+ /* Host Function Force Parity Error Registers */
+ BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
+
+ /* LL Port[0|1] Halt Mask Registers */
+ BNAD_GET_REG(LL_HALT_MSK_P0);
+ BNAD_GET_REG(LL_HALT_MSK_P1);
+
+ /* LL Port[0|1] Error Mask Registers */
+ BNAD_GET_REG(LL_ERR_MSK_P0);
+ BNAD_GET_REG(LL_ERR_MSK_P1);
+
+ /* EMC FLI Registers */
+ BNAD_GET_REG(FLI_CMD_REG);
+ BNAD_GET_REG(FLI_ADDR_REG);
+ BNAD_GET_REG(FLI_CTL_REG);
+ BNAD_GET_REG(FLI_WRDATA_REG);
+ BNAD_GET_REG(FLI_RDDATA_REG);
+ BNAD_GET_REG(FLI_DEV_STATUS_REG);
+ BNAD_GET_REG(FLI_SIG_WD_REG);
+
+ BNAD_GET_REG(FLI_DEV_VENDOR_REG);
+ BNAD_GET_REG(FLI_ERR_STATUS_REG);
+
+ /* RxAdm 0 Registers */
+ BNAD_GET_REG(RAD0_CTL_REG);
+ BNAD_GET_REG(RAD0_PE_PARM_REG);
+ BNAD_GET_REG(RAD0_BCN_REG);
+ BNAD_GET_REG(RAD0_DEFAULT_REG);
+ BNAD_GET_REG(RAD0_PROMISC_REG);
+ BNAD_GET_REG(RAD0_BCNQ_REG);
+ BNAD_GET_REG(RAD0_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD0_ERR_STS);
+ BNAD_GET_REG(RAD0_SET_ERR_STS);
+ BNAD_GET_REG(RAD0_ERR_INT_EN);
+ BNAD_GET_REG(RAD0_FIRST_ERR);
+ BNAD_GET_REG(RAD0_FORCE_ERR);
+
+ BNAD_GET_REG(RAD0_MAC_MAN_1H);
+ BNAD_GET_REG(RAD0_MAC_MAN_1L);
+ BNAD_GET_REG(RAD0_MAC_MAN_2H);
+ BNAD_GET_REG(RAD0_MAC_MAN_2L);
+ BNAD_GET_REG(RAD0_MAC_MAN_3H);
+ BNAD_GET_REG(RAD0_MAC_MAN_3L);
+ BNAD_GET_REG(RAD0_MAC_MAN_4H);
+ BNAD_GET_REG(RAD0_MAC_MAN_4L);
+
+ BNAD_GET_REG(RAD0_LAST4_IP);
+
+ /* RxAdm 1 Registers */
+ BNAD_GET_REG(RAD1_CTL_REG);
+ BNAD_GET_REG(RAD1_PE_PARM_REG);
+ BNAD_GET_REG(RAD1_BCN_REG);
+ BNAD_GET_REG(RAD1_DEFAULT_REG);
+ BNAD_GET_REG(RAD1_PROMISC_REG);
+ BNAD_GET_REG(RAD1_BCNQ_REG);
+ BNAD_GET_REG(RAD1_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD1_ERR_STS);
+ BNAD_GET_REG(RAD1_SET_ERR_STS);
+ BNAD_GET_REG(RAD1_ERR_INT_EN);
+
+ /* TxA0 Registers */
+ BNAD_GET_REG(TXA0_CTRL_REG);
+ /* TxA0 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
+ }
+
+ /* TxA1 Registers */
+ BNAD_GET_REG(TXA1_CTRL_REG);
+ /* TxA1 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
+ }
+
+ /* RxA Registers */
+ BNAD_GET_REG(RXA0_CTL_REG);
+ BNAD_GET_REG(RXA1_CTL_REG);
+
+ /* PLB0 Registers */
+ BNAD_GET_REG(PLB0_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB0_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB0_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB0_RL_MIN_REG);
+ BNAD_GET_REG(PLB0_RL_MAX_REG);
+ BNAD_GET_REG(PLB0_EMS_ADD_REG);
+
+ /* PLB1 Registers */
+ BNAD_GET_REG(PLB1_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB1_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB1_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB1_RL_MIN_REG);
+ BNAD_GET_REG(PLB1_RL_MAX_REG);
+ BNAD_GET_REG(PLB1_EMS_ADD_REG);
+
+ /* HQM Control Register */
+ BNAD_GET_REG(HQM0_CTL_REG);
+ BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_CTL_REG);
+ BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
+
+ /* LUT Registers */
+ BNAD_GET_REG(LUT0_ERR_STS);
+ BNAD_GET_REG(LUT0_SET_ERR_STS);
+ BNAD_GET_REG(LUT1_ERR_STS);
+ BNAD_GET_REG(LUT1_SET_ERR_STS);
+
+ /* TRC Registers */
+ BNAD_GET_REG(TRC_CTL_REG);
+ BNAD_GET_REG(TRC_MODS_REG);
+ BNAD_GET_REG(TRC_TRGC_REG);
+ BNAD_GET_REG(TRC_CNT1_REG);
+ BNAD_GET_REG(TRC_CNT2_REG);
+ BNAD_GET_REG(TRC_NXTS_REG);
+ BNAD_GET_REG(TRC_DIRR_REG);
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_TRGM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_NXTM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_STRM_REG(i));
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+#undef BNAD_GET_REG
+ return num;
+}
+static int
+bnad_get_regs_len(struct net_device *netdev)
+{
+ int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
+ return ret;
+}
+
+static void
+bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
+{
+ memset(buf, 0, bnad_get_regs_len(netdev));
+ get_regs(netdev_priv(netdev), buf);
+}
+
+static void
+bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
+{
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+}
+
+static int
+bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ /* Lock rqd. to access bnad->bna_lock */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ coalesce->use_adaptive_rx_coalesce =
+ (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
+
+ return 0;
+}
+
+static int
+bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+ int dim_timer_del = 0;
+
+ if (coalesce->rx_coalesce_usecs == 0 ||
+ coalesce->rx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ if (coalesce->tx_coalesce_usecs == 0 ||
+ coalesce->tx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ /*
+ * Do not need to store rx_coalesce_usecs here
+ * Every time DIM is disabled, we can get it from the
+ * stack.
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (coalesce->use_adaptive_rx_coalesce) {
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+ bnad_dim_timer_start(bnad);
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
+ bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
+ dim_timer_del = bnad_dim_timer_running(bnad);
+ if (dim_timer_del) {
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ del_timer_sync(&bnad->dim_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ }
+ bnad_rx_coalescing_timeo_set(bnad);
+ }
+ }
+ if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+ bnad_tx_coalescing_timeo_set(bnad);
+ }
+
+ if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
+ bnad_rx_coalescing_timeo_set(bnad);
+
+ }
+
+ /* Add Tx Inter-pkt DMA count? */
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
+
+ ringparam->rx_pending = bnad->rxq_depth;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_pending = bnad->txq_depth;
+}
+
+static int
+bnad_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ int i, current_err, err = 0;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (ringparam->rx_pending == bnad->rxq_depth &&
+ ringparam->tx_pending == bnad->txq_depth) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
+ !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+ if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+
+ if (ringparam->rx_pending != bnad->rxq_depth) {
+ bnad->rxq_depth = ringparam->rx_pending;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ bnad_cleanup_rx(bnad, i);
+ current_err = bnad_setup_rx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+ if (ringparam->tx_pending != bnad->txq_depth) {
+ bnad->txq_depth = ringparam->tx_pending;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ bnad_cleanup_tx(bnad, i);
+ current_err = bnad_setup_tx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ pauseparam->autoneg = 0;
+ pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
+ pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
+}
+
+static int
+bnad_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ unsigned long flags;
+
+ if (pauseparam->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
+ pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
+ pause_config.rx_pause = pauseparam->rx_pause;
+ pause_config.tx_pause = pauseparam->tx_pause;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static u32
+bnad_get_rx_csum(struct net_device *netdev)
+{
+ u32 rx_csum;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ rx_csum = bnad->rx_csum;
+ return rx_csum;
+}
+
+static int
+bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad->rx_csum = rx_csum;
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int
+bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tx_csum) {
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ } else {
+ netdev->features &= ~NETIF_F_IP_CSUM;
+ netdev->features &= ~NETIF_F_IPV6_CSUM;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int
+bnad_set_tso(struct net_device *netdev, u32 tso)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tso) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, q_num;
+ u64 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
+ ETH_GSTRING_LEN));
+ memcpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ sprintf(string, "txf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_errors", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_mac_sa", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ sprintf(string, "rxf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_frame_drops", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "cq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_hw_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_packets_with_error",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string,
+ "rxq%d_packets_with_error", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ sprintf(string, "txq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_hw_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_stats_count_locked(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, count, rxf_active_num = 0, txf_active_num = 0;
+ u64 bmap;
+
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1)
+ txf_active_num++;
+ bmap >>= 1;
+ }
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1)
+ rxf_active_num++;
+ bmap >>= 1;
+ }
+ count = BNAD_ETHTOOL_STATS_NUM +
+ txf_active_num * BNAD_NUM_TXF_COUNTERS +
+ rxf_active_num * BNAD_NUM_RXF_COUNTERS;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
+ count += BNAD_NUM_RXQ_COUNTERS;
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
+ }
+ return count;
+}
+
+static int
+bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
+{
+ int i, j;
+ struct bna_rcb *rcb = NULL;
+ struct bna_tcb *tcb = NULL;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
+ buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
+ ccb->producer_index;
+ buf[bi++] = 0; /* ccb->consumer_index */
+ buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
+ ccb->hw_producer_index);
+ }
+ }
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[0]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[0];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ }
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ if (bnad->tx_info[i].tcb[j] &&
+ bnad->tx_info[i].tcb[j]->txq) {
+ tcb = bnad->tx_info[i].tcb[j];
+ buf[bi++] = tcb->txq->tx_packets;
+ buf[bi++] = tcb->txq->tx_bytes;
+ buf[bi++] = tcb->producer_index;
+ buf[bi++] = tcb->consumer_index;
+ buf[bi++] = *(tcb->hw_consumer_index);
+ }
+ }
+
+ return bi;
+}
+
+static void
+bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
+ u64 *buf)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, bi;
+ unsigned long *net_stats, flags;
+ u64 *stats64;
+ u64 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
+ mutex_unlock(&bnad->conf_mutex);
+ return;
+ }
+
+ /*
+ * Used bna_lock to sync reads from bna_stats, which is written
+ * under the same lock
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bi = 0;
+ memset(buf, 0, stats->n_stats * sizeof(u64));
+ memset(&bnad->net_stats, 0, sizeof(struct net_device_stats));
+
+ bnad_netdev_qstats_fill(bnad);
+ bnad_netdev_hwstats_fill(bnad);
+
+ /* Fill net_stats into ethtool buffers */
+ net_stats = (unsigned long *)&bnad->net_stats;
+ for (i = 0; i < sizeof(struct net_device_stats) / sizeof(unsigned long);
+ i++)
+ buf[bi++] = net_stats[i];
+
+ /* Fill driver stats into ethtool buffers */
+ stats64 = (u64 *)&bnad->stats.drv_stats;
+ for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
+ stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
+ for (i = 0;
+ i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
+ i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill txf stats into ethtool buffers */
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats->txf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill rxf stats into ethtool buffers */
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats->rxf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill per Q stats into ethtool buffers */
+ bi = bnad_per_q_stats_fill(bnad, buf, bi);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return bnad_get_stats_count_locked(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ethtool_ops bnad_ethtool_ops = {
+ .get_settings = bnad_get_settings,
+ .set_settings = bnad_set_settings,
+ .get_drvinfo = bnad_get_drvinfo,
+ .get_regs_len = bnad_get_regs_len,
+ .get_regs = bnad_get_regs,
+ .get_wol = bnad_get_wol,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = bnad_get_coalesce,
+ .set_coalesce = bnad_set_coalesce,
+ .get_ringparam = bnad_get_ringparam,
+ .set_ringparam = bnad_set_ringparam,
+ .get_pauseparam = bnad_get_pauseparam,
+ .set_pauseparam = bnad_set_pauseparam,
+ .get_rx_csum = bnad_get_rx_csum,
+ .set_rx_csum = bnad_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = bnad_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = bnad_set_tso,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ethtool_op_set_flags,
+ .get_strings = bnad_get_strings,
+ .get_ethtool_stats = bnad_get_ethtool_stats,
+ .get_sset_count = bnad_get_sset_count
+};
+
+void
+bnad_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+}
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __CNA_H__
+#define __CNA_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/string.h>
+
+#include <linux/list.h>
+
+#define bfa_sm_fault(__mod, __event) do { \
+ pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
+ __event); \
+} while (0)
+
+extern char bfa_version[];
+
+#define CNA_FW_FILE_CT "ctfw_cna.bin"
+#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
+
+#pragma pack(1)
+
+#define MAC_ADDRLEN (6)
+typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+
+#pragma pack()
+
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) { \
+ bfa_q_next(_qe) = (struct list_head *) NULL; \
+ bfa_q_prev(_qe) = (struct list_head *) NULL; \
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
+ bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
+ bfa_q_qe_init(*((struct list_head **) _qe)); \
+ } else { \
+ *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+ } \
+}
+
+#endif /* __CNA_H__ */
--- /dev/null
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/firmware.h>
+#include "cna.h"
+
+const struct firmware *bfi_fw;
+static u32 *bfi_image_ct_cna;
+static u32 bfi_image_ct_cna_size;
+
+u32 *
+cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name)
+{
+ const struct firmware *fw;
+
+ if (request_firmware(&fw, fw_name, &pdev->dev)) {
+ pr_alert("Can't locate firmware %s\n", fw_name);
+ goto error;
+ }
+
+ *bfi_image = (u32 *)fw->data;
+ *bfi_image_size = fw->size/sizeof(u32);
+ bfi_fw = fw;
+
+ return *bfi_image;
+error:
+ return NULL;
+}
+
+u32 *
+cna_get_firmware_buf(struct pci_dev *pdev)
+{
+ if (bfi_image_ct_cna_size == 0)
+ cna_read_firmware(pdev, &bfi_image_ct_cna,
+ &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
+ return bfi_image_ct_cna;
+}
+
+u32 *
+bfa_cb_image_get_chunk(int type, u32 off)
+{
+ return (u32 *)(bfi_image_ct_cna + off);
+}
+
+u32
+bfa_cb_image_get_size(int type)
+{
+ return bfi_image_ct_cna_size;
+}
#define PCI_VENDOR_ID_ARIMA 0x161f
#define PCI_VENDOR_ID_BROCADE 0x1657
+#define PCI_DEVICE_ID_BROCADE_CT 0x0014
+#define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017
+#define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021
#define PCI_VENDOR_ID_SIBYTE 0x166d
#define PCI_DEVICE_ID_BCM1250_PCI 0x0001