#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
-#define QLCNIC_MAX_TX_RINGS 8
-#define QLCNIC_MAX_SDS_RINGS 8
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING 1
+#define QLCNIC_DEF_SDS_RINGS 4
+#define QLCNIC_DEF_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+
+enum qlcnic_queue_type {
+ QLCNIC_TX_QUEUE = 1,
+ QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE 0xFF
+#define QLCNIC_DEFAULT_MODE 0x0
/*
* Following are the states of the Phantom. Phantom will set them and
#define QLCNIC_BEACON_EANBLE 0xC
#define QLCNIC_BEACON_DISABLE 0xD
-#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
-#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
unsigned long state;
u32 flags;
- int max_drv_tx_rings;
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
u16 max_jumbo_rxd;
u8 max_rds_rings;
- u8 max_sds_rings;
+
+ u8 max_sds_rings; /* max sds rings supported by adapter */
+ u8 max_tx_rings; /* max tx rings supported by adapter */
+
+ u8 drv_tx_rings; /* max tx rings supported by driver */
+ u8 drv_sds_rings; /* max sds rings supported by driver */
+
u8 rx_csum;
u8 portnum;
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
-int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
-int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int err, tx_q;
-
- tx_q = adapter->max_drv_tx_rings;
+ int err;
- netdev->num_tx_queues = tx_q;
- netdev->real_num_tx_queues = tx_q;
+ netdev->num_tx_queues = adapter->drv_tx_rings;
+ netdev->real_num_tx_queues = adapter->drv_tx_rings;
- err = netif_set_real_num_tx_queues(netdev, tx_q);
+ err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
if (err)
dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
- tx_q);
+ adapter->drv_tx_rings);
else
- dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+ dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
+ adapter->drv_tx_rings);
return err;
}
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
- int (*setup_intr) (struct qlcnic_adapter *, u8, int);
+ int (*setup_intr) (struct qlcnic_adapter *);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
- u8 num_intr, int txq)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
+ return adapter->ahw->hw_ops->setup_intr(adapter);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
{
test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
/* When operating in a muti tx mode, driver needs to write 0x1
#include <linux/interrupt.h>
#include <linux/aer.h>
-#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
+ num_msix = adapter->drv_sds_rings;
+
/* account for AEN interrupt MSI-X based interrupts */
num_msix += 1;
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- num_msix += adapter->max_drv_tx_rings;
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
- num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+ num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */
index = 2;
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
memset(&sds_mbx, 0, sds_mbx_size);
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
index = 0;
/* status descriptor ring */
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
sds = &recv_ctx->sds_rings[i];
sds->crb_sts_consumer = ahw->pci_base0 +
mbx_out->host_csmr[index];
struct qlcnic_hardware_context *ahw = adapter->ahw;
num_rds = adapter->max_rds_rings;
- if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
- num_sds = adapter->max_sds_rings;
+ if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+ num_sds = adapter->drv_sds_rings;
else
- num_sds = QLCNIC_MAX_RING_SETS;
+ num_sds = QLCNIC_MAX_SDS_RINGS;
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
- if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+ if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
err = qlcnic_83xx_add_rings(adapter);
out:
qlcnic_free_mbx_args(&cmd);
mbx.size = tx->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- msix_vector = adapter->max_sds_rings + ring;
+ msix_vector = adapter->drv_sds_rings + ring;
else
- msix_vector = adapter->max_sds_rings - 1;
+ msix_vector = adapter->drv_sds_rings - 1;
msix_id = ahw->intr_tbl[msix_vector].id;
} else {
msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
- cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
+ cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
buf = &cmd.req.arg[6];
memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
/* send the mailbox command*/
tx->ctx_id = mbx_out->ctx_id;
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+ intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
}
static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
- int num_sds_ring)
+ u8 num_sds_ring)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
if (ret) {
qlcnic_detach(adapter);
if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
- adapter->max_sds_rings = num_sds_ring;
+ adapter->drv_sds_rings = num_sds_ring;
qlcnic_attach(adapter);
}
netif_device_attach(netdev);
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_enable_intr(adapter, sds_ring);
}
}
static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
- int max_sds_rings)
+ u8 drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
}
}
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
if (qlcnic_attach(adapter))
goto out;
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+ u8 drv_sds_rings = adapter->drv_sds_rings;
+ int ret = 0, loop = 0;
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
mode == QLCNIC_ILB_MODE ? "internal" : "external");
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_alloc;
qlcnic_83xx_clear_lb_mode(adapter, mode);
free_diag_res:
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_alloc:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ u8 val, drv_sds_rings = adapter->drv_sds_rings;
u32 data;
u16 intrpt_id, id;
- u8 val;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret;
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev, "Device is resetting\n");
}
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_irq;
done:
qlcnic_free_mbx_args(&cmd);
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
if (err)
return err;
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) {
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
qlcnic_83xx_set_vnic_opmode(adapter);
} else {
#define QLC_83XX_HOST_SDS_MBX_IDX 8
#define QLCNIC_HOST_RDS_MBX_IDX 88
-#define QLCNIC_MAX_RING_SETS 8
/* Pause control registers */
#define QLC_83XX_SRE_SHIM_REG 0x0D200284
u8 num_pci_func;
u8 state;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
struct qlcnic_add_rings_mbx_out {
u8 sts_num;
u8 rcv_num;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
/* Transmit context mailbox inbox registers
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
-#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
-#define QLC_83XX_DEFAULT_MODE 0x0
#define QLC_83XX_SRIOV_MODE 0x1
#define QLCNIC_BRDTYPE_83XX_10G 0x0083
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
- if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
if (qlcnic_check_diag_status(adapter)) {
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
+ adapter->max_tx_rings = ahw->max_tx_ques;
+ adapter->max_sds_rings = ahw->max_rx_ques;
/* eSwitch capability indicates vNIC mode.
* vNIC and SRIOV are mutually exclusive operational modes.
* If SR-IOV capability is detected, SR-IOV physical function
return QLC_83XX_DEFAULT_OPMODE;
if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
- return QLC_83XX_VIRTUAL_NIC_MODE;
+ return QLCNIC_VNIC_MODE;
return QLC_83XX_DEFAULT_OPMODE;
}
if (ret == -EIO)
return -EIO;
- if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
- ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+ if (ret == QLCNIC_VNIC_MODE) {
+ ahw->nic_mode = QLCNIC_VNIC_MODE;
+
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ adapter->max_tx_rings = QLCNIC_SINGLE_RING;
} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ adapter->max_sds_rings = ahw->max_rx_ques;
+ adapter->max_tx_rings = QLCNIC_SINGLE_RING;
} else {
return -EIO;
}
return err;
}
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ /* compute and set drv sds rings */
+ if (adapter->ahw->msix_supported)
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_DEF_SDS_RINGS);
+ else
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+}
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
int err = 0;
ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ qlcnic_83xx_init_rings(adapter);
+
err = qlcnic_83xx_init_mailbox_work(adapter);
if (err)
goto exit;
if (err)
goto detach_mbx;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto disable_intr;
if (err)
goto disable_mbx_intr;
+
/* Perform operating mode specific initialization */
err = adapter->nic_ops->init_driver(adapter);
if (err)
clear_bit(QLC_83XX_MBX_READY, &idc->status);
cancel_delayed_work_sync(&adapter->fw_work);
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
int err;
nrds_rings = adapter->max_rds_rings;
- nsds_rings = adapter->max_sds_rings;
+ nsds_rings = adapter->drv_sds_rings;
rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
nsds_rings);
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test) {
- temp_nsds_rings = adapter->max_sds_rings;
+ temp_nsds_rings = adapter->drv_sds_rings;
index = temp_nsds_rings + ring;
msix_id = ahw->intr_tbl[index].id;
prq->msi_index = cpu_to_le16(msix_id);
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
intr_mask = ahw->intr_tbl[index].src;
tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
if (err)
goto err_out;
- for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < dev->drv_tx_rings; ring++) {
err = qlcnic_fw_cmd_create_tx_ctx(dev,
&dev->tx_ring[ring],
ring);
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_del_rx_ctx(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->hw_consumer != NULL) {
dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
}
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+ adapter->max_tx_rings = npar_info->max_tx_ques;
+ adapter->max_sds_rings = npar_info->max_rx_ques;
}
qlcnic_free_mbx_args(&cmd);
static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
{
- int ring_regs_cnt = (adapter->max_drv_tx_rings * 5) +
+ int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
(adapter->max_rds_rings * 2) +
- (adapter->max_sds_rings * 3) + 5;
+ (adapter->drv_sds_rings * 3) + 5;
return ring_regs_cnt * sizeof(u32);
}
/* Marker btw regs and TX ring count */
regs_buff[i++] = 0xFFEFCDAB;
- regs_buff[i++] = adapter->max_drv_tx_rings; /* No. of TX ring */
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
regs_buff[i++] = tx_ring->sw_consumer;
regs_buff[i++] = rds_rings->producer;
}
- regs_buff[i++] = adapter->max_sds_rings; /* No. of SDS ring */
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &(recv_ctx->sds_rings[ring]);
regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
regs_buff[i++] = sds_ring->consumer;
return qlcnic_reset_context(adapter);
}
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+ u8 rx_ring, u8 tx_ring)
+{
+ if (rx_ring != 0) {
+ if (rx_ring > adapter->max_sds_rings) {
+ netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ rx_ring, adapter->max_sds_rings);
+ return -EINVAL;
+ }
+ }
+
+ if (tx_ring != 0) {
+ if (qlcnic_82xx_check(adapter) &&
+ (tx_ring > adapter->max_tx_rings)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+ tx_ring, adapter->max_tx_rings);
+ return -EINVAL;
+ }
+
+ if (qlcnic_83xx_check(adapter) &&
+ (tx_ring > QLCNIC_SINGLE_RING)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
+ tx_ring, QLCNIC_SINGLE_RING);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int min;
-
- min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
- channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
- channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->max_drv_tx_rings;
+ channel->max_rx = adapter->max_sds_rings;
+ channel->max_tx = adapter->max_tx_rings;
+ channel->rx_count = adapter->drv_sds_rings;
+ channel->tx_count = adapter->drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
- struct ethtool_channels *channel)
+ struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
- int txq = 0;
if (channel->other_count || channel->combined_count)
return -EINVAL;
+ err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+ channel->tx_count);
+ if (err)
+ return err;
+
if (channel->rx_count) {
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
+ err = qlcnic_validate_rings(adapter, channel->rx_count,
+ QLCNIC_RX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u SDS rings\n",
+ channel->rx_count);
return err;
+ }
}
if (qlcnic_82xx_check(adapter) && channel->tx_count) {
- err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
- if (err)
+ err = qlcnic_validate_rings(adapter, channel->tx_count,
+ QLCNIC_TX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u Tx rings\n",
+ channel->tx_count);
return err;
- txq = channel->tx_count;
+ }
}
- err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
- netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
- adapter->max_sds_rings, adapter->max_drv_tx_rings);
+ err = qlcnic_setup_rings(adapter, channel->rx_count,
+ channel->tx_count);
+ netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+ adapter->drv_sds_rings, adapter->drv_tx_rings);
+
return err;
}
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret, drv_sds_rings = adapter->drv_sds_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
qlcnic_free_mbx_args(&cmd);
free_diag_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_drv_tx_rings = adapter->max_drv_tx_rings;
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
qlcnic_clear_lb_mode(adapter, mode);
free_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_it:
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_drv_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
break;
case ETH_SS_STATS:
num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
- for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ for (i = 0; i < adapter->drv_tx_rings; i++) {
for (index = 0; index < num_stats; index++) {
sprintf(data, "tx_queue_%d %s", i,
qlcnic_tx_queue_stats_strings[index]);
struct qlcnic_host_tx_ring *tx_ring;
int ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
int index, ret, length, size, tx_size, ring;
char *p;
- tx_size = adapter->max_drv_tx_rings * QLCNIC_TX_STATS_LEN;
+ tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
memset(data, 0, tx_size * sizeof(u64));
- for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
tx_ring = &adapter->tx_ring[ring];
data = qlcnic_fill_tx_queue_stats(data, tx_ring);
enum ethtool_phys_id_state state)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(dev, max_sds_rings);
+ qlcnic_diag_free_res(dev, drv_sds_rings);
if (!active || err)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
#define QLCNIC_MBX_PORT_RSP_OK 0x1a
#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS 8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
struct qlcnic_pci_info;
struct qlcnic_info;
struct qlcnic_cmd_args;
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
spin_lock_init(&rds_ring->lock);
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
return done;
}
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
- if (ring == (adapter->max_sds_rings - 1))
+ if (ring == (adapter->drv_sds_rings - 1))
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_poll,
NAPI_POLL_WEIGHT);
}
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
NAPI_POLL_WEIGHT);
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
qlcnic_free_sds_rings(adapter->recv_ctx);
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_enable_tx_intr(adapter, tx_ring);
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
napi_synchronize(&sds_ring->napi);
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
qlcnic_check_multi_tx(adapter)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_disable_tx_int(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
qlcnic_83xx_disable_intr(adapter, sds_ring);
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll,
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
.io_resume = qlcnic_82xx_io_resume,
};
-static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int num_tx_q;
- if (ahw->msix_supported &&
+ if (qlcnic_82xx_check(adapter) &&
(ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
- num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
- num_online_cpus());
- if (num_tx_q > 1) {
- test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
- &adapter->state);
- adapter->max_drv_tx_rings = num_tx_q;
- }
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ return 0;
} else {
- adapter->max_drv_tx_rings = 1;
+ return 1;
}
}
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+ int queue_type)
+{
+ int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ max_rings = adapter->max_sds_rings;
+ else if (queue_type == QLCNIC_TX_QUEUE)
+ max_rings = adapter->max_tx_rings;
+
+ num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ max_rings));
+
+ if (ring_cnt > num_rings)
+ return num_rings;
+ else
+ return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+ /* 83xx adapter does not have max_tx_rings intialized in probe */
+ if (adapter->max_tx_rings)
+ adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+ QLCNIC_TX_QUEUE);
+ else
+ adapter->drv_tx_rings = tx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
+ adapter->drv_tx_rings);
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+ /* 83xx adapter does not have max_sds_rings intialized in probe */
+ if (adapter->max_sds_rings)
+ adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+ QLCNIC_RX_QUEUE);
+ else
+ adapter->drv_sds_rings = rx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
+ adapter->drv_sds_rings);
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int max_tx_rings, max_sds_rings, tx_vector;
+ int drv_tx_rings, drv_sds_rings, tx_vector;
int err = -1, i;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
- max_tx_rings = 0;
+ drv_tx_rings = 0;
tx_vector = 0;
} else {
- max_tx_rings = adapter->max_drv_tx_rings;
+ drv_tx_rings = adapter->drv_tx_rings;
tx_vector = 1;
}
return -ENOMEM;
}
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
if (adapter->ahw->msix_supported) {
if (qlcnic_83xx_check(adapter)) {
adapter->ahw->num_msix = num_msix;
/* subtract mail box and tx ring vectors */
- adapter->max_sds_rings = num_msix -
- max_tx_rings - 1;
+ adapter->drv_sds_rings = num_msix -
+ drv_tx_rings - 1;
} else {
adapter->ahw->num_msix = num_msix;
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1))
- max_sds_rings = num_msix - max_tx_rings;
+ (adapter->drv_tx_rings > 1))
+ drv_sds_rings = num_msix - drv_tx_rings;
else
- max_sds_rings = num_msix;
+ drv_sds_rings = num_msix;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
if (qlcnic_83xx_check(adapter)) {
if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
return err;
- err -= (max_tx_rings + 1);
+ err -= drv_tx_rings + 1;
num_msix = rounddown_pow_of_two(err);
- num_msix += (max_tx_rings + 1);
+ num_msix += drv_tx_rings + 1;
} else {
num_msix = rounddown_pow_of_two(err);
if (qlcnic_check_multi_tx(adapter))
- num_msix += max_tx_rings;
+ num_msix += drv_tx_rings;
}
if (num_msix) {
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+ num_msix = adapter->drv_sds_rings;
- if (ahw->msix_supported) {
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
- if (qlcnic_check_multi_tx(adapter)) {
- if (txq)
- adapter->max_drv_tx_rings = txq;
- num_msix += adapter->max_drv_tx_rings;
- }
- } else {
- num_msix = 1;
- }
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
- /* Disable NPAR for 83XX */
- if (qlcnic_83xx_check(adapter))
- return err;
-
- if (adapter->ahw->capabilities & BIT_6)
+ if (adapter->ahw->capabilities & BIT_6) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
return err;
}
"HAL Version: %d, Privileged function\n",
adapter->ahw->fw_hal_version);
}
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
}
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- num_sds_rings = adapter->max_sds_rings;
+ num_sds_rings = adapter->drv_sds_rings;
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
snprintf(tx_ring->name, sizeof(tx_ring->name),
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
free_irq(sds_ring->irq, sds_ring);
}
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
(qlcnic_82xx_check(adapter) &&
qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->irq)
adapter->ahw->linkup = 0;
- if (adapter->max_sds_rings > 1)
+ if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
qlcnic_config_intr_coalesce(adapter);
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
qlcnic_reset_rx_buffers_list(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+ spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
adapter->is_up = 0;
}
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
- int max_tx_rings = adapter->max_drv_tx_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
}
qlcnic_detach(adapter);
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
if (qlcnic_attach(adapter))
goto out;
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
- adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_int(sds_ring);
}
int ring;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
vfree(tx_ring->cmd_buf_arr);
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *cmd_buf_arr;
- tx_ring = kcalloc(adapter->max_drv_tx_rings,
+ tx_ring = kcalloc(adapter->drv_tx_rings,
sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
if (tx_ring == NULL)
return -ENOMEM;
adapter->tx_ring = tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, ring);
if (qlcnic_83xx_check(adapter) ||
(qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
vector = adapter->msix_entries[index].vector;
tx_ring->irq = vector;
}
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
+ spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
goto err_out_maintenance_mode;
}
- qlcnic_get_multiq_capability(adapter);
-
- if ((adapter->ahw->act_pci_func > 2) &&
- qlcnic_check_multi_tx(adapter)) {
- adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
- dev_info(&adapter->pdev->dev,
- "vNIC mode enabled, Set max TX rings = %d\n",
- adapter->max_drv_tx_rings);
+ /* compute and set default and max tx/sds rings */
+ if (adapter->ahw->msix_supported) {
+ if (qlcnic_check_multi_tx_capability(adapter) == 1)
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_SINGLE_RING);
+ else
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_DEF_TX_RINGS);
+ qlcnic_set_sds_ring_count(adapter,
+ QLCNIC_DEF_SDS_RINGS);
+ } else {
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
}
- if (!qlcnic_check_multi_tx(adapter)) {
- clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
- }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
if (dcb && qlcnic_dcb_attach(dcb))
qlcnic_clear_dcb_ops(dcb);
-
} else if (qlcnic_83xx_check(adapter)) {
- adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
-
if (err) {
switch (err) {
case -ENOTRECOVERABLE:
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netdev_info(netdev, "Tx ring=%d\n", ring);
netdev_info(netdev,
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_intr(adapter->irq, sds_ring);
}
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
return err;
}
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+ int queue_type)
{
struct net_device *netdev = adapter->netdev;
- u8 max_hw = QLCNIC_MAX_TX_RINGS;
- u32 max_allowed;
+ u8 max_hw_rings = 0;
+ char buf[8];
+ int cur_rings;
+
+ if (queue_type == QLCNIC_RX_QUEUE) {
+ max_hw_rings = adapter->max_sds_rings;
+ cur_rings = adapter->drv_sds_rings;
+ strcpy(buf, "SDS");
+ } else if (queue_type == QLCNIC_TX_QUEUE) {
+ if (qlcnic_83xx_check(adapter))
+ max_hw_rings = QLCNIC_SINGLE_RING;
+ else
+ max_hw_rings = adapter->max_tx_rings;
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
- return -EINVAL;
+ cur_rings = adapter->drv_tx_rings;
+ strcpy(buf, "Tx");
}
- if (!qlcnic_check_multi_tx(adapter)) {
- netdev_err(netdev, "No Multi TX-Q support\n");
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
- if (txq > QLCNIC_MAX_TX_RINGS) {
- netdev_err(netdev, "Invalid ring count\n");
+ if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((txq > max_allowed) || !is_power_of_2(txq)) {
- if (!is_power_of_2(txq))
- netdev_err(netdev,
- "TX queue should be a power of 2\n");
- if (txq > num_online_cpus())
- netdev_err(netdev,
- "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
- num_online_cpus());
- netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ if (ring_cnt < 2) {
+ netdev_err(netdev,
+ "%s rings value should not be lower than 2\n", buf);
return -EINVAL;
}
- return 0;
-}
-
-int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
-{
- struct net_device *netdev = adapter->netdev;
- u8 max_hw = adapter->ahw->max_rx_ques;
- u32 max_allowed;
-
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
+ if (!is_power_of_2(ring_cnt)) {
+ netdev_err(netdev, "%s rings value should be a power of 2\n",
+ buf);
return -EINVAL;
}
- if (val > QLCNIC_MAX_SDS_RINGS) {
- netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLCNIC_MAX_SDS_RINGS);
- return -EINVAL;
+ if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi Tx queue support\n");
+ return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
- if (!is_power_of_2(val))
- netdev_err(netdev, "RSS value should be a power of 2\n");
-
- if (val < 2)
- netdev_err(netdev, "RSS value should not be lower than 2\n");
-
- if (val > max_hw)
- netdev_err(netdev,
- "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n",
- max_hw);
-
- if (val > num_online_cpus())
- netdev_err(netdev,
- "RSS value should not be higher than[%u], number of online CPUs in the system\n",
- num_online_cpus());
-
- netdev_err(netdev, "Unable to configure %u RSS rings\n", val);
-
+ if (ring_cnt > num_online_cpus()) {
+ netdev_err(netdev,
+ "%s value[%u] should not be higher than, number of online CPUs\n",
+ buf, num_online_cpus());
return -EINVAL;
}
+
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
{
- int err;
struct net_device *netdev = adapter->netdev;
- int num_msix;
+ int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
- if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
- !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
- return -EINVAL;
- }
-
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
- if (qlcnic_82xx_check(adapter)) {
- if (txq != 0)
- adapter->max_drv_tx_rings = txq;
-
- if (qlcnic_check_multi_tx(adapter) &&
- (txq > adapter->max_drv_tx_rings))
- num_msix = adapter->max_drv_tx_rings;
- else
- num_msix = data;
- }
-
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
- netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
-
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data, txq);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+
+ netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
dev_warn(&adapter->pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1, 0);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
const char *buf, size_t len)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int err, max_sds_rings = adapter->max_sds_rings;
+ int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
u8 h_beacon_state, b_state, b_rate;
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+ qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
out:
if (!ahw->beacon_state)