[SCSI] lpfc 8.3.30: Fixed missing CVL event causing FCF failover
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / lpfc / lpfc_init.c
index 55bc4fc7376f5a753f08e2d8aa44b3846cc377e6..1afa50f54b8723823789f1109cf1ef6bc8db2a35 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/aer.h>
 #include <linux/slab.h>
 #include <linux/firmware.h>
+#include <linux/miscdevice.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -62,7 +63,6 @@ static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
 static int lpfc_setup_endian_order(struct lpfc_hba *);
-static int lpfc_sli4_read_config(struct lpfc_hba *);
 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
 static void lpfc_free_sgl_list(struct lpfc_hba *);
 static int lpfc_init_sgl_list(struct lpfc_hba *);
@@ -475,27 +475,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
        /* Get the default values for Model Name and Description */
        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 
-       if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
-               && !(phba->lmt & LMT_1Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
-               && !(phba->lmt & LMT_2Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
-               && !(phba->lmt & LMT_4Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
-               && !(phba->lmt & LMT_8Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
-               && !(phba->lmt & LMT_10Gb))
-           || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
-               && !(phba->lmt & LMT_16Gb))) {
-               /* Reset link speed to auto */
-               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-                       "1302 Invalid speed for this board: "
-                       "Reset link speed to auto: x%x\n",
-                       phba->cfg_link_speed);
-                       phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
-       }
-
        phba->link_state = LPFC_LINK_DOWN;
 
        /* Only process IOCBs on ELS ring till hba_state is READY */
@@ -585,28 +564,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                        return -EIO;
                }
        } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
-               lpfc_init_link(phba, pmb, phba->cfg_topology,
-                       phba->cfg_link_speed);
-               pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-               lpfc_set_loopback_flag(phba);
-               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-               if (rc != MBX_SUCCESS) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0454 Adapter failed to init, mbxCmd x%x "
-                               "INIT_LINK, mbxStatus x%x\n",
-                               mb->mbxCommand, mb->mbxStatus);
-
-                       /* Clear all interrupt enable conditions */
-                       writel(0, phba->HCregaddr);
-                       readl(phba->HCregaddr); /* flush */
-                       /* Clear all pending interrupts */
-                       writel(0xffffffff, phba->HAregaddr);
-                       readl(phba->HAregaddr); /* flush */
-                       phba->link_state = LPFC_HBA_ERROR;
-                       if (rc != MBX_BUSY)
-                               mempool_free(pmb, phba->mbox_mem_pool);
-                       return -EIO;
-               }
+               mempool_free(pmb, phba->mbox_mem_pool);
+               rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+               if (rc)
+                       return rc;
        }
        /* MBOX buffer will be freed in mbox compl */
        pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -667,6 +628,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)
  **/
 int
 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
+{
+       return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *              0 - success
+ *              Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+                              uint32_t flag)
 {
        struct lpfc_vport *vport = phba->pport;
        LPFC_MBOXQ_t *pmb;
@@ -681,9 +664,30 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
        mb = &pmb->u.mb;
        pmb->vport = vport;
 
-       lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+       if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+            !(phba->lmt & LMT_1Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+            !(phba->lmt & LMT_2Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+            !(phba->lmt & LMT_4Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+            !(phba->lmt & LMT_8Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+            !(phba->lmt & LMT_10Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+            !(phba->lmt & LMT_16Gb))) {
+               /* Reset link speed to auto */
+               lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                       "1302 Invalid speed for this board:%d "
+                       "Reset link speed to auto.\n",
+                       phba->cfg_link_speed);
+                       phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+       }
+       lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-       lpfc_set_loopback_flag(phba);
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               lpfc_set_loopback_flag(phba);
        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
        if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1437,7 +1441,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        uint32_t event_data;
        struct Scsi_Host *shost;
        uint32_t if_type;
-       struct lpfc_register portstat_reg;
+       struct lpfc_register portstat_reg = {0};
+       uint32_t reg_err1, reg_err2;
+       uint32_t uerrlo_reg, uemasklo_reg;
+       uint32_t pci_rd_rc1, pci_rd_rc2;
        int rc;
 
        /* If the pci channel is offline, ignore possible errors, since
@@ -1449,38 +1456,56 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        if (!phba->cfg_enable_hba_reset)
                return;
 
-       /* Send an internal error event to mgmt application */
-       lpfc_board_errevt_to_mgmt(phba);
-
-       /* For now, the actual action for SLI4 device handling is not
-        * specified yet, just treated it as adaptor hardware failure
-        */
-       event_data = FC_REG_DUMP_EVENT;
-       shost = lpfc_shost_from_vport(vport);
-       fc_host_post_vendor_event(shost, fc_get_event_number(),
-                                 sizeof(event_data), (char *) &event_data,
-                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
-
        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
        switch (if_type) {
        case LPFC_SLI_INTF_IF_TYPE_0:
+               pci_rd_rc1 = lpfc_readl(
+                               phba->sli4_hba.u.if_type0.UERRLOregaddr,
+                               &uerrlo_reg);
+               pci_rd_rc2 = lpfc_readl(
+                               phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+                               &uemasklo_reg);
+               /* consider PCI bus read error as pci_channel_offline */
+               if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+                       return;
                lpfc_sli4_offline_eratt(phba);
                break;
        case LPFC_SLI_INTF_IF_TYPE_2:
-               portstat_reg.word0 =
-                       readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-
+               pci_rd_rc1 = lpfc_readl(
+                               phba->sli4_hba.u.if_type2.STATUSregaddr,
+                               &portstat_reg.word0);
+               /* consider PCI bus read error as pci_channel_offline */
+               if (pci_rd_rc1 == -EIO) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3151 PCI bus read access failure: x%x\n",
+                               readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
+                       return;
+               }
+               reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+               reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
                if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
                        /* TODO: Register for Overtemp async events. */
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2889 Port Overtemperature event, "
-                               "taking port\n");
+                               "taking port offline\n");
                        spin_lock_irq(&phba->hbalock);
                        phba->over_temp_state = HBA_OVER_TEMP;
                        spin_unlock_irq(&phba->hbalock);
                        lpfc_sli4_offline_eratt(phba);
-                       return;
+                       break;
                }
+               if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                   reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3143 Port Down: Firmware Restarted\n");
+               else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                        reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3144 Port Down: Debug Dump\n");
+               else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                        reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3145 Port Down: Provisioning\n");
                /*
                 * On error status condition, driver need to wait for port
                 * ready before performing reset.
@@ -1489,23 +1514,41 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
                if (!rc) {
                        /* need reset: attempt for port recovery */
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2887 Port Error: Attempting "
-                                       "Port Recovery\n");
+                                       "2887 Reset Needed: Attempting Port "
+                                       "Recovery...\n");
                        lpfc_offline_prep(phba);
                        lpfc_offline(phba);
                        lpfc_sli_brdrestart(phba);
                        if (lpfc_online(phba) == 0) {
                                lpfc_unblock_mgmt_io(phba);
-                               return;
+                               /* don't report event on forced debug dump */
+                               if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                                   reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+                                       return;
+                               else
+                                       break;
                        }
                        /* fall through for not able to recover */
                }
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3152 Unrecoverable error, bring the port "
+                               "offline\n");
                lpfc_sli4_offline_eratt(phba);
                break;
        case LPFC_SLI_INTF_IF_TYPE_1:
        default:
                break;
        }
+       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                       "3123 Report dump event to upper layer\n");
+       /* Send an internal error event to mgmt application */
+       lpfc_board_errevt_to_mgmt(phba);
+
+       event_data = FC_REG_DUMP_EVENT;
+       shost = lpfc_shost_from_vport(vport);
+       fc_host_post_vendor_event(shost, fc_get_event_number(),
+                                 sizeof(event_data), (char *) &event_data,
+                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
 }
 
 /**
@@ -2298,13 +2341,20 @@ lpfc_cleanup(struct lpfc_vport *vport)
                        continue;
                }
 
+               /* take care of nodes in unused state before the state
+                * machine taking action.
+                */
+               if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+                       lpfc_nlp_put(ndlp);
+                       continue;
+               }
+
                if (ndlp->nlp_type & NLP_FABRIC)
                        lpfc_disc_state_machine(vport, ndlp, NULL,
                                        NLP_EVT_DEVICE_RECOVERY);
 
                lpfc_disc_state_machine(vport, ndlp, NULL,
                                             NLP_EVT_DEVICE_RM);
-
        }
 
        /* At this point, ALL ndlp's should be gone
@@ -2477,6 +2527,42 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
        }
 }
 
+/**
+ * lpfc_sli4_node_prep - Assign RPIs for active nodes.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Allocate RPIs for all active remote nodes. This is needed whenever
+ * an SLI4 adapter is reset and the driver is not unloading. Its purpose
+ * is to fixup the temporary rpi assignments.
+ **/
+void
+lpfc_sli4_node_prep(struct lpfc_hba *phba)
+{
+       struct lpfc_nodelist  *ndlp, *next_ndlp;
+       struct lpfc_vport **vports;
+       int i;
+
+       if (phba->sli_rev != LPFC_SLI_REV4)
+               return;
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+                       if (vports[i]->load_flag & FC_UNLOADING)
+                               continue;
+
+                       list_for_each_entry_safe(ndlp, next_ndlp,
+                                                &vports[i]->fc_nodes,
+                                                nlp_listp) {
+                               if (NLP_CHK_NODE_ACT(ndlp))
+                                       ndlp->nlp_rpi =
+                                               lpfc_sli4_alloc_rpi(phba);
+                       }
+               }
+       }
+       lpfc_destroy_vport_work_array(phba, vports);
+}
+
 /**
  * lpfc_online - Initialize and bring a HBA online
  * @phba: pointer to lpfc hba data structure.
@@ -2619,6 +2705,13 @@ lpfc_offline_prep(struct lpfc_hba * phba)
                                spin_lock_irq(shost->host_lock);
                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
                                spin_unlock_irq(shost->host_lock);
+                               /*
+                                * Whenever an SLI4 port goes offline, free the
+                                * RPI. Get a new RPI when the adapter port
+                                * comes back online.
+                                */
+                               if (phba->sli_rev == LPFC_SLI_REV4)
+                                       lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
                                lpfc_unreg_rpi(vports[i], ndlp);
                        }
                }
@@ -2673,6 +2766,36 @@ lpfc_offline(struct lpfc_hba *phba)
        lpfc_destroy_vport_work_array(phba, vports);
 }
 
+/**
+ * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine goes through all the scsi buffers in the system and updates the
+ * Physical XRIs assigned to the SCSI buffer because these may change after any
+ * firmware reset
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_scsi_buf_update(struct lpfc_hba *phba)
+{
+       struct lpfc_scsi_buf *sb, *sb_next;
+
+       spin_lock_irq(&phba->hbalock);
+       spin_lock(&phba->scsi_buf_list_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+               sb->cur_iocbq.sli4_xritag =
+                       phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+               set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
+               phba->sli4_hba.max_cfg_param.xri_used++;
+               phba->sli4_hba.xri_count++;
+       }
+       spin_unlock(&phba->scsi_buf_list_lock);
+       spin_unlock_irq(&phba->hbalock);
+       return 0;
+}
+
 /**
  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
  * @phba: pointer to lpfc hba data structure.
@@ -3602,6 +3725,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                break;
 
        case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+               phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2549 FCF (x%x) disconnected from network, "
                        "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -3663,6 +3787,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                }
                break;
        case LPFC_FIP_EVENT_TYPE_CVL:
+               phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2718 Clear Virtual Link Received for VPI 0x%x"
                        " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -4266,6 +4391,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
        int longs, sli_family;
+       int sges_per_segment;
 
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
@@ -4329,6 +4455,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
+       /* With BlockGuard we can have multiple SGEs per Data Segemnt */
+       sges_per_segment = 1;
+       if (phba->cfg_enable_bg)
+               sges_per_segment = 2;
+
        /*
         * Since the sg_tablesize is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4337,7 +4468,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
         * sgl sizes of must be a power of 2.
         */
        buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
-                   ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
+                   (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
+                   sizeof(struct sli4_sge)));
 
        sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
        max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
@@ -4354,6 +4486,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        default:
                break;
        }
+
        for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
             dma_buf_size < max_buf_size && buf_size > dma_buf_size;
             dma_buf_size = dma_buf_size << 1)
@@ -5040,15 +5173,8 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
        struct lpfc_rpi_hdr *rpi_hdr;
 
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-       /*
-        * If the SLI4 port supports extents, posting the rpi header isn't
-        * required.  Set the expected maximum count and let the actual value
-        * get set when extents are fully allocated.
-        */
-       if (!phba->sli4_hba.rpi_hdrs_in_use) {
-               phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+       if (!phba->sli4_hba.rpi_hdrs_in_use)
                return rc;
-       }
        if (phba->sli4_hba.extents_in_use)
                return -EIO;
 
@@ -5104,8 +5230,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
         * rpi is normalized to a zero base because the physical rpi is
         * port based.
         */
-       curr_rpi_range = phba->sli4_hba.next_rpi -
-               phba->sli4_hba.max_cfg_param.rpi_base;
+       curr_rpi_range = phba->sli4_hba.next_rpi;
        spin_unlock_irq(&phba->hbalock);
 
        /*
@@ -5942,7 +6067,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
  *     -ENOMEM - No available memory
  *      -EIO - The mailbox failed to complete successfully.
  **/
-static int
+int
 lpfc_sli4_read_config(struct lpfc_hba *phba)
 {
        LPFC_MBOXQ_t *pmb;
@@ -5974,6 +6099,20 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                rc = -EIO;
        } else {
                rd_config = &pmb->u.mqe.un.rd_config;
+               if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+                       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+                       phba->sli4_hba.lnk_info.lnk_tp =
+                               bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+                       phba->sli4_hba.lnk_info.lnk_no =
+                               bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                                       "3081 lnk_type:%d, lnk_numb:%d\n",
+                                       phba->sli4_hba.lnk_info.lnk_tp,
+                                       phba->sli4_hba.lnk_info.lnk_no);
+               } else
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                                       "3082 Mailbox (x%x) returned ldv:x0\n",
+                                       bf_get(lpfc_mqe_command, &pmb->u.mqe));
                phba->sli4_hba.extents_in_use =
                        bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
                phba->sli4_hba.max_cfg_param.max_xri =
@@ -6006,7 +6145,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
                phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
                phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
-               phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
                phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
                phba->max_vports = phba->max_vpi;
@@ -6462,6 +6600,7 @@ out_free_fcp_wq:
                phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
        }
        kfree(phba->sli4_hba.fcp_wq);
+       phba->sli4_hba.fcp_wq = NULL;
 out_free_els_wq:
        lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
        phba->sli4_hba.els_wq = NULL;
@@ -6474,6 +6613,7 @@ out_free_fcp_cq:
                phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
        }
        kfree(phba->sli4_hba.fcp_cq);
+       phba->sli4_hba.fcp_cq = NULL;
 out_free_els_cq:
        lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
        phba->sli4_hba.els_cq = NULL;
@@ -6486,6 +6626,7 @@ out_free_fp_eq:
                phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
        }
        kfree(phba->sli4_hba.fp_eq);
+       phba->sli4_hba.fp_eq = NULL;
 out_free_sp_eq:
        lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
        phba->sli4_hba.sp_eq = NULL;
@@ -6519,8 +6660,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        phba->sli4_hba.els_wq = NULL;
 
        /* Release FCP work queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
-               lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+       if (phba->sli4_hba.fcp_wq != NULL)
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+                    fcp_qidx++)
+                       lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
        kfree(phba->sli4_hba.fcp_wq);
        phba->sli4_hba.fcp_wq = NULL;
 
@@ -6540,15 +6683,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 
        /* Release FCP response complete queue */
        fcp_qidx = 0;
-       do
-               lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
-       while (++fcp_qidx < phba->cfg_fcp_eq_count);
+       if (phba->sli4_hba.fcp_cq != NULL)
+               do
+                       lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+               while (++fcp_qidx < phba->cfg_fcp_eq_count);
        kfree(phba->sli4_hba.fcp_cq);
        phba->sli4_hba.fcp_cq = NULL;
 
        /* Release fast-path event queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
-               lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+       if (phba->sli4_hba.fp_eq != NULL)
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+                    fcp_qidx++)
+                       lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
        kfree(phba->sli4_hba.fp_eq);
        phba->sli4_hba.fp_eq = NULL;
 
@@ -6601,11 +6747,18 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.sp_eq->queue_id);
 
        /* Set up fast-path event queue */
+       if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3147 Fast-path EQs not allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_sp_eq;
+       }
        for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
                if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0522 Fast-path EQ (%d) not "
                                        "allocated\n", fcp_eqidx);
+                       rc = -ENOMEM;
                        goto out_destroy_fp_eq;
                }
                rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -6630,6 +6783,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.mbx_cq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0528 Mailbox CQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_fp_eq;
        }
        rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
@@ -6649,6 +6803,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.els_cq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0530 ELS CQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_mbx_cq;
        }
        rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
@@ -6665,12 +6820,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.sp_eq->queue_id);
 
        /* Set up fast-path FCP Response Complete Queue */
+       if (!phba->sli4_hba.fcp_cq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3148 Fast-path FCP CQ array not "
+                               "allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_els_cq;
+       }
        fcp_cqidx = 0;
        do {
                if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0526 Fast-path FCP CQ (%d) not "
                                        "allocated\n", fcp_cqidx);
+                       rc = -ENOMEM;
                        goto out_destroy_fcp_cq;
                }
                if (phba->cfg_fcp_eq_count)
@@ -6709,6 +6872,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.mbx_wq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0538 Slow-path MQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_fcp_cq;
        }
        rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
@@ -6728,6 +6892,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.els_wq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0536 Slow-path ELS WQ not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_mbx_wq;
        }
        rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
@@ -6744,11 +6909,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.els_cq->queue_id);
 
        /* Set up fast-path FCP Work Queue */
+       if (!phba->sli4_hba.fcp_wq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3149 Fast-path FCP WQ array not "
+                               "allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_els_wq;
+       }
        for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
                if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0534 Fast-path FCP WQ (%d) not "
                                        "allocated\n", fcp_wqidx);
+                       rc = -ENOMEM;
                        goto out_destroy_fcp_wq;
                }
                rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
@@ -6779,6 +6952,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0540 Receive Queue not allocated\n");
+               rc = -ENOMEM;
                goto out_destroy_fcp_wq;
        }
 
@@ -6805,18 +6979,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 out_destroy_fcp_wq:
        for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
                lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_els_wq:
        lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
 out_destroy_mbx_wq:
        lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
 out_destroy_fcp_cq:
        for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
                lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_els_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
 out_destroy_mbx_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
 out_destroy_fp_eq:
        for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
                lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+out_destroy_sp_eq:
        lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
 out_error:
        return rc;
@@ -6853,13 +7030,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
        /* Unset ELS complete queue */
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
        /* Unset FCP response complete queue */
-       fcp_qidx = 0;
-       do {
-               lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
-       } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+       if (phba->sli4_hba.fcp_cq) {
+               fcp_qidx = 0;
+               do {
+                       lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+               } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+       }
        /* Unset fast-path event queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
-               lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+       if (phba->sli4_hba.fp_eq) {
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+                    fcp_qidx++)
+                       lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+       }
        /* Unset slow-path event queue */
        lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
 }
@@ -7111,19 +7293,17 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
                                        rc = -ENODEV;
                                        goto out;
                                }
-                               if (bf_get(lpfc_sliport_status_rdy, &reg_data))
-                                       break;
-                               if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+                               if (bf_get(lpfc_sliport_status_rn, &reg_data))
                                        reset_again++;
+                               if (bf_get(lpfc_sliport_status_rdy, &reg_data))
                                        break;
-                               }
                        }
 
                        /*
                         * If the port responds to the init request with
                         * reset needed, delay for a bit and restart the loop.
                         */
-                       if (reset_again) {
+                       if (reset_again && (rdy_chk < 1000)) {
                                msleep(10);
                                reset_again = 0;
                                continue;
@@ -7398,22 +7578,25 @@ out:
 static void
 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
 {
-       struct pci_dev *pdev;
-
-       /* Obtain PCI device reference */
-       if (!phba->pcidev)
-               return;
-       else
-               pdev = phba->pcidev;
-
-       /* Free coherent DMA memory allocated */
-
-       /* Unmap I/O memory space */
-       iounmap(phba->sli4_hba.drbl_regs_memmap_p);
-       iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
-       iounmap(phba->sli4_hba.conf_regs_memmap_p);
+       uint32_t if_type;
+       if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 
-       return;
+       switch (if_type) {
+       case LPFC_SLI_INTF_IF_TYPE_0:
+               iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+               iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+               iounmap(phba->sli4_hba.conf_regs_memmap_p);
+               break;
+       case LPFC_SLI_INTF_IF_TYPE_2:
+               iounmap(phba->sli4_hba.conf_regs_memmap_p);
+               break;
+       case LPFC_SLI_INTF_IF_TYPE_1:
+       default:
+               dev_printk(KERN_ERR, &phba->pcidev->dev,
+                          "FATAL - unsupported SLI4 interface type - %d\n",
+                          if_type);
+               break;
+       }
 }
 
 /**
@@ -7997,6 +8180,9 @@ lpfc_unset_hba(struct lpfc_hba *phba)
        vport->load_flag |= FC_UNLOADING;
        spin_unlock_irq(shost->host_lock);
 
+       kfree(phba->vpi_bmask);
+       kfree(phba->vpi_ids);
+
        lpfc_stop_hba_timers(phba);
 
        phba->pport->work_port_events = 0;
@@ -8529,6 +8715,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
        /* Final cleanup of txcmplq and reset the HBA */
        lpfc_sli_brdrestart(phba);
 
+       kfree(phba->vpi_bmask);
+       kfree(phba->vpi_ids);
+
        lpfc_stop_hba_timers(phba);
        spin_lock_irq(&phba->hbalock);
        list_del_init(&vport->listentry);
@@ -8943,7 +9132,7 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
 int
 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
 {
-       char fwrev[32];
+       char fwrev[FW_REV_STR_SIZE];
        struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
        struct list_head dma_buffer_list;
        int i, rc = 0;
@@ -9198,12 +9387,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Perform post initialization setup */
        lpfc_post_init_setup(phba);
 
-       /* check for firmware upgrade or downgrade */
-       snprintf(file_name, 16, "%s.grp", phba->ModelName);
-       error = request_firmware(&fw, file_name, &phba->pcidev->dev);
-       if (!error) {
-               lpfc_write_firmware(phba, fw);
-               release_firmware(fw);
+       /* check for firmware upgrade or downgrade (if_type 2 only) */
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+           LPFC_SLI_INTF_IF_TYPE_2) {
+               snprintf(file_name, 16, "%s.grp", phba->ModelName);
+               error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+               if (!error) {
+                       lpfc_write_firmware(phba, fw);
+                       release_firmware(fw);
+               }
        }
 
        /* Check if there are static vports to be created. */
@@ -9894,6 +10086,36 @@ lpfc_io_resume(struct pci_dev *pdev)
        return;
 }
 
+/**
+ * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine puts a reference count on the lpfc module whenever the
+ * character device is opened
+ **/
+static int
+lpfc_mgmt_open(struct inode *inode, struct file *filep)
+{
+       try_module_get(THIS_MODULE);
+       return 0;
+}
+
+/**
+ * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine removes a reference count from the lpfc module when the
+ * character device is closed
+ **/
+static int
+lpfc_mgmt_release(struct inode *inode, struct file *filep)
+{
+       module_put(THIS_MODULE);
+       return 0;
+}
+
 static struct pci_device_id lpfc_id_table[] = {
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
                PCI_ANY_ID, PCI_ANY_ID, },
@@ -10006,6 +10228,17 @@ static struct pci_driver lpfc_driver = {
        .err_handler    = &lpfc_err_handler,
 };
 
+static const struct file_operations lpfc_mgmt_fop = {
+       .open = lpfc_mgmt_open,
+       .release = lpfc_mgmt_release,
+};
+
+static struct miscdevice lpfc_mgmt_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "lpfcmgmt",
+       .fops = &lpfc_mgmt_fop,
+};
+
 /**
  * lpfc_init - lpfc module initialization routine
  *
@@ -10026,6 +10259,11 @@ lpfc_init(void)
        printk(LPFC_MODULE_DESC "\n");
        printk(LPFC_COPYRIGHT "\n");
 
+       error = misc_register(&lpfc_mgmt_dev);
+       if (error)
+               printk(KERN_ERR "Could not register lpfcmgmt device, "
+                       "misc_register returned with status %d", error);
+
        if (lpfc_enable_npiv) {
                lpfc_transport_functions.vport_create = lpfc_vport_create;
                lpfc_transport_functions.vport_delete = lpfc_vport_delete;
@@ -10062,6 +10300,7 @@ lpfc_init(void)
 static void __exit
 lpfc_exit(void)
 {
+       misc_deregister(&lpfc_mgmt_dev);
        pci_unregister_driver(&lpfc_driver);
        fc_release_transport(lpfc_transport_template);
        if (lpfc_enable_npiv)