Merge branch 'misc' into for-linus
authorJames Bottomley <JBottomley@Parallels.com>
Fri, 10 May 2013 14:53:40 +0000 (07:53 -0700)
committerJames Bottomley <JBottomley@Parallels.com>
Fri, 10 May 2013 14:53:40 +0000 (07:53 -0700)
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
98 files changed:
drivers/scsi/Kconfig
drivers/scsi/aic94xx/aic94xx_dev.c
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_tmf.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_iscsi.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/csiostor/csio_lnode.h
drivers/scsi/csiostor/csio_rnode.h
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_fip.h [new file with mode: 0644]
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/fnic/vnic_dev.h
drivers/scsi/fnic/vnic_devcmd.h
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/remote_device.h
drivers/scsi/isci/request.c
drivers/scsi/isci/task.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/lpfc/lpfc_vport.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/pm8001/Makefile
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/pm8001/pm8001_defs.h
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_hwi.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c [new file with mode: 0644]
drivers/scsi/pm8001/pm80xx_hwi.h [new file with mode: 0644]
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_dif.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufshcd-pltfrm.c [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
include/linux/pci_ids.h
include/scsi/libsas.h
include/scsi/osd_protocol.h
include/scsi/sas.h
include/scsi/sas_ata.h
include/scsi/scsi_transport_iscsi.h
include/scsi/scsi_transport_sas.h

index db95c547c09d6781d68dca561abe230c5d0f57ce..86af29f53bbebefec7d58ca0c595c3852e6fdf79 100644 (file)
@@ -1353,6 +1353,8 @@ config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
        select SCSI_FC_ATTRS
+       select GENERIC_CSUM
+       select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
           Family of Fibre Channel PCI host adapters.
index 64136c56e7065052bf24d0aa53e500f69f3957d0..33072388ea166a0cce3a2019782b4f64e5a91e33 100644 (file)
@@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev)
        struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
-       if (dev->dev_type == SATA_PM_PORT)
+       if (dev->dev_type == SAS_SATA_PM_PORT)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
        else if (dev->tproto)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
        u32 qdepth = 0;
 
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
                if (ata_id_has_ncq(ata_dev->id))
                        qdepth = ata_id_queue_depth(ata_dev->id);
                asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
        asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-           dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+           dev->dev_type == SAS_SATA_PM_PORT) {
                struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
                        dev->frame_rcvd;
                asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
        if (dev->port->oob_mode != SATA_OOB_MODE) {
                flags |= OPEN_REQUIRED;
-               if ((dev->dev_type == SATA_DEV) ||
+               if ((dev->dev_type == SAS_SATA_DEV) ||
                    (dev->tproto & SAS_PROTOCOL_STP)) {
                        struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
                        if (rps_resp->frame_type == SMP_RESPONSE &&
@@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev)
                } else {
                        flags |= CONCURRENT_CONN_SUPP;
                        if (!dev->parent &&
-                           (dev->dev_type == EDGE_DEV ||
-                            dev->dev_type == FANOUT_DEV))
+                           (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                            dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
                                asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
                                                       4);
                        else
@@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                        asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
                }
        }
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                flags |= SATA_MULTIPORT;
        asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
 
@@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
        asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
                i = asd_init_sata(dev);
                if (i < 0) {
                        asd_free_ddb(asd_ha, ddb);
@@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                }
        }
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
                if (rdev->I_T_nexus_loss_timeout > 0)
                        asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev)
 
        spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
        switch (dev->dev_type) {
-       case SATA_PM:
+       case SAS_SATA_PM:
                res = asd_init_sata_pm_ddb(dev);
                break;
-       case SATA_PM_PORT:
+       case SAS_SATA_PM_PORT:
                res = asd_init_sata_pm_port_ddb(dev);
                break;
        default:
index 81b736c76fffab7bf40be168ff1ae3fb2388faad..4df867e07b2022df773ce0f7f0115e2b7bf4a5e7 100644 (file)
@@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy)
 
        memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
 
-       phy->identify_frame->dev_type = SAS_END_DEV;
+       phy->identify_frame->dev_type = SAS_END_DEVICE;
        if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
                phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
        if (phy->sas_phy.role & PHY_ROLE_TARGET)
index cf9040933da6497ca500454274fe163c50ec324d..d4c35df3d4ae600f9e5d0d7a02ed547f4c8ff905 100644 (file)
@@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
        struct sas_phy *phy = sas_get_local_phy(dev);
        /* Standard mandates link reset for ATA  (type 0) and
         * hard reset for SSP (type 1) */
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                          (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 
        asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
index f1733dfa3ae24489b44b3f6bee92568589c7ade1..777e7c0bbb4b327d6624a2d771a78fa5033d432c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 5c87768c109c0fa9a8fc0dda6f8bcb6c357ca083..e66aa7c11a8a21e041f8956c6ed0f758401c8420 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
        uint16_t status = 0, addl_status = 0, wrb_num = 0;
        struct be_mcc_wrb *temp_wrb;
        struct be_cmd_req_hdr *ioctl_hdr;
+       struct be_cmd_resp_hdr *ioctl_resp_hdr;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
        if (beiscsi_error(phba))
@@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                            ioctl_hdr->subsystem,
                            ioctl_hdr->opcode,
                            status, addl_status);
+
+               if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+                       if (ioctl_resp_hdr->response_length)
+                               goto release_mcc_tag;
+               }
                rc = -EAGAIN;
        }
 
@@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
+       struct be_cmd_resp_hdr *resp_hdr;
 
        be_dws_le_to_cpu(compl, 4);
 
@@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
                            hdr->subsystem, hdr->opcode,
                            compl_status, extd_status);
 
+               if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+                       if (resp_hdr->response_length)
+                               return 0;
+               }
                return -EBUSY;
        }
        return 0;
@@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
                struct be_async_event_link_state *evt)
 {
-       switch (evt->port_link_status) {
-       case ASYNC_EVENT_LINK_DOWN:
+       if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
+           ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+            (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
+               phba->state = BE_ADAPTER_LINK_DOWN;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link Down on Physical Port %d\n",
+                           "BC_%d : Link Down on Port %d\n",
                            evt->physical_port);
 
-               phba->state |= BE_ADAPTER_LINK_DOWN;
                iscsi_host_for_each_session(phba->shost,
                                            be2iscsi_fail_session);
-               break;
-       case ASYNC_EVENT_LINK_UP:
+       } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
+                   ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+                    (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
                phba->state = BE_ADAPTER_UP;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link UP on Physical Port %d\n",
-                           evt->physical_port);
-               break;
-       default:
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Unexpected Async Notification %d on"
-                           "Physical Port %d\n",
-                           evt->port_link_status,
+                           "BC_%d : Link UP on Port %d\n",
                            evt->physical_port);
        }
 }
@@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 {
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       int wait = 0;
+       uint32_t wait = 0;
        u32 ready;
 
        do {
@@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        struct be_mcc_compl *compl = &mbox->compl;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val &= ~MPU_MAILBOX_DB_RDY_MASK;
        val |= MPU_MAILBOX_DB_HI_MASK;
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
        struct be_mcc_compl *compl = &mbox->compl;
        struct be_ctrl_info *ctrl = &phba->ctrl;
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val |= MPU_MAILBOX_DB_HI_MASK;
        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
        return status;
 }
 
+/**
+ * be_cmd_fw_initialize()- Initialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW initialize pattern for the function.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
        return status;
 }
 
+/**
+ * be_cmd_fw_uninit()- Uinitialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW uninitialize pattern for the function
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+       int status;
+       u8 *endian_check;
+
+       spin_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+
+       endian_check = (u8 *) wrb;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xAA;
+       *endian_check++ = 0xBB;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xCC;
+       *endian_check++ = 0xDD;
+       *endian_check = 0xFF;
+
+       be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+       status = be_mbox_notify(ctrl);
+       if (status)
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : be_cmd_fw_uninit Failed\n");
+
+       spin_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *cq, struct be_queue_info *eq,
                          bool sol_evts, bool no_delay, int coalesce_wm)
@@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                        OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-       if (chip_skh_r(ctrl->pdev)) {
-               req->hdr.version = MBX_CMD_VER2;
-               req->page_size = 1;
-               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
-                             ctxt, coalesce_wm);
-               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
-                             ctxt, no_delay);
-               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
-                             __ilog2_u32(cq->len / 256));
-               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
-               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_cq_context, coalescwm,
                              ctxt, coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
                              PCI_FUNC(ctrl->pdev->devfn));
+       } else {
+               req->hdr.version = MBX_CMD_VER2;
+               req->page_size = 1;
+               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+                             ctxt, coalesce_wm);
+               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+                             ctxt, no_delay);
+               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+                             __ilog2_u32(cq->len / 256));
+               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
        }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_defq_create_req *req = embedded_payload(wrb);
        struct be_dma_mem *q_mem = &dq->dma_mem;
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        void *ctxt = &req->context;
        int status;
 
@@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                           OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
-                     1);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
-                     PCI_FUNC(ctrl->pdev->devfn));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
-                     be_encoded_q_len(length / sizeof(struct phys_addr)));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
-                     ctxt, entry_size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
-                     cq->id);
+
+       if (is_chip_be2_be3r(phba)) {
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             cq_id_recv, ctxt, cq->id);
+       } else {
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             cq_id_recv, ctxt, cq->id);
+       }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
index 23397d51ac54f5f9550f15b3b4571ed8342ee762..99073086dfe06522621e57b4e8a8e2d968f7c1c7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -52,6 +52,10 @@ struct be_mcc_wrb {
 
 /* Completion Status */
 #define MCC_STATUS_SUCCESS 0x0
+#define MCC_STATUS_FAILED 0x1
+#define MCC_STATUS_ILLEGAL_REQUEST 0x2
+#define MCC_STATUS_ILLEGAL_FIELD 0x3
+#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
 
 #define CQE_STATUS_COMPL_MASK 0xFFFF
 #define CQE_STATUS_COMPL_SHIFT 0       /* bits 0 - 15 */
@@ -118,7 +122,8 @@ struct be_async_event_trailer {
 
 enum {
        ASYNC_EVENT_LINK_DOWN = 0x0,
-       ASYNC_EVENT_LINK_UP = 0x1
+       ASYNC_EVENT_LINK_UP = 0x1,
+       ASYNC_EVENT_LOGICAL = 0x2
 };
 
 /**
@@ -130,6 +135,9 @@ struct be_async_event_link_state {
        u8 port_link_status;
        u8 port_duplex;
        u8 port_speed;
+#define BEISCSI_PHY_LINK_FAULT_NONE    0x00
+#define BEISCSI_PHY_LINK_FAULT_LOCAL   0x01
+#define BEISCSI_PHY_LINK_FAULT_REMOTE  0x02
        u8 port_fault;
        u8 rsvd0[7];
        struct be_async_event_trailer trailer;
@@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                        uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@@ -751,6 +760,18 @@ struct amap_be_default_pdu_context {
        u8 rsvd4[32];           /* dword 3 */
 } __packed;
 
+struct amap_default_pdu_context_ext {
+       u8 rsvd0[16];   /* dword 0 */
+       u8 ring_size[4];    /* dword 0 */
+       u8 rsvd1[12];   /* dword 0 */
+       u8 rsvd2[22];   /* dword 1 */
+       u8 rx_pdid[9];  /* dword 1 */
+       u8 rx_pdid_valid;   /* dword 1 */
+       u8 default_buffer_size[16]; /* dword 2 */
+       u8 cq_id_recv[16];  /* dword 2 */
+       u8 rsvd3[32];   /* dword 3 */
+} __packed;
+
 struct be_defq_create_req {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
@@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 {
  * stack to notify the
  * controller of a posted Work Request Block
  */
-#define DB_WRB_POST_CID_MASK           0x3FF   /* bits 0 - 9 */
+#define DB_WRB_POST_CID_MASK           0xFFFF  /* bits 0 - 16 */
 #define DB_DEF_PDU_WRB_INDEX_MASK      0xFF    /* bits 0 - 9 */
 
 #define DB_DEF_PDU_WRB_INDEX_SHIFT     16
index 9014690fe841f08e8993bcfd2153cba785f6135e..ef36be003f67a49f0d37cf1ed1136c87894bd942 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
                                struct beiscsi_conn *beiscsi_conn,
                                unsigned int cid)
 {
-       if (phba->conn_table[cid]) {
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index]) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table already occupied. Detected clash\n");
 
@@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
        } else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
-                           cid, beiscsi_conn);
+                           cri_index, beiscsi_conn);
 
-               phba->conn_table[cid] = beiscsi_conn;
+               phba->conn_table[cri_index] = beiscsi_conn;
        }
        return 0;
 }
@@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
 static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
 {
        struct beiscsi_hba *phba = beiscsi_ep->phba;
+       struct beiscsi_conn *beiscsi_conn;
 
        beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
        beiscsi_ep->phba = NULL;
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = NULL;
+
+       /**
+        * Check if any connection resource allocated by driver
+        * is to be freed.This case occurs when target redirection
+        * or connection retry is done.
+        **/
+       if (!beiscsi_ep->conn)
+               return;
+
+       beiscsi_conn = beiscsi_ep->conn;
+       if (beiscsi_conn->login_in_progress) {
+               beiscsi_free_mgmt_task_handles(beiscsi_conn,
+                                              beiscsi_conn->task);
+               beiscsi_conn->login_in_progress = 0;
+       }
 }
 
 /**
@@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 {
        struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
        struct beiscsi_hba *phba = beiscsi_ep->phba;
-       struct be_mcc_wrb *wrb;
        struct tcp_connect_and_offload_out *ptcpcnct_out;
        struct be_dma_mem nonemb_cmd;
        unsigned int tag;
@@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                    "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
                    beiscsi_ep->ep_cid);
 
-       phba->ep_array[beiscsi_ep->ep_cid -
-                      phba->fw_config.iscsi_cid_start] = ep;
-       if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
-                                 phba->params.cxns_per_ctrl * 2)) {
-
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Failed in allocate iscsi cid\n");
-               goto free_ep;
-       }
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = ep;
 
        beiscsi_ep->cid_vld = 0;
        nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                            "BS_%d : Failed to allocate memory for"
                            " mgmt_open_connection\n");
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
+               beiscsi_free_ep(beiscsi_ep);
                return -ENOMEM;
        }
        nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
        memset(nonemb_cmd.va, 0, nonemb_cmd.size);
        tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
-       if (!tag) {
+       if (tag <= 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : mgmt_open_connection Failed for cid=%d\n",
                            beiscsi_ep->ep_cid);
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
+               beiscsi_free_ep(beiscsi_ep);
                return -EAGAIN;
        }
 
-       ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+       ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
        if (ret) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
-               goto free_ep;
+               beiscsi_free_ep(beiscsi_ep);
+               return -EBUSY;
        }
 
-       ptcpcnct_out = embedded_payload(wrb);
+       ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
        beiscsi_ep = ep->dd_data;
        beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
        beiscsi_ep->cid_vld = 1;
@@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
        return 0;
-
-free_ep:
-       beiscsi_free_ep(beiscsi_ep);
-       return -EBUSY;
 }
 
 /**
@@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                return ERR_PTR(ret);
        }
 
+       if (beiscsi_error(phba)) {
+               ret = -EIO;
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BS_%d : The FW state Not Stable!!!\n");
+               return ERR_PTR(ret);
+       }
+
        if (phba->state != BE_ADAPTER_UP) {
                ret = -EBUSY;
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
 static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
                                      unsigned int cid)
 {
-       if (phba->conn_table[cid])
-               phba->conn_table[cid] = NULL;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index])
+               phba->conn_table[cri_index] = NULL;
        else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table Not occupied.\n");
index 38eab7232159dd7ec412e798a3a521cc8bab862f..31ddc84943989b94eaa4682b91d1a8461c7d066c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 4e2733d2300365761e7dba5df79e3a1b606c67e2..d24a2867bc21cb885506f9d35d0fbe397e27400a 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
 
 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
+DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
 struct device_attribute *beiscsi_attrs[] = {
        &dev_attr_beiscsi_log_enable,
        &dev_attr_beiscsi_drvr_ver,
        &dev_attr_beiscsi_adapter_family,
+       &dev_attr_beiscsi_fw_ver,
+       &dev_attr_beiscsi_active_cid_count,
        NULL,
 };
 
@@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
                                    + BE2_TMFS
                                    + BE2_NOPOUT_REQ));
        phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
+       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
        phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
        phba->params.num_sge_per_io = BE2_SGE;
        phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
 static unsigned int
 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
                          struct beiscsi_hba *phba,
-                         unsigned short cid,
                          struct pdu_base *ppdu,
                          unsigned long pdu_len,
                          void *pbuffer, unsigned long buf_len)
@@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[cid];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (pwrb_context->wrb_handles_available >= 2) {
                pwrb_handle = pwrb_context->pwrb_handle_base[
                                            pwrb_context->alloc_index];
@@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
        hdr->t2retain = 0;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->dlength[0] = 0;
        hdr->dlength[1] = 0;
@@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
        hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
-                         csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->itt = io_task->libiscsi_itt;
        __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
        struct hwi_controller *phwi_ctrlr;
        struct iscsi_task *task;
        struct beiscsi_io_task *io_task;
-       struct iscsi_conn *conn = beiscsi_conn->conn;
-       struct iscsi_session *session = conn->session;
-       uint16_t wrb_index, cid;
+       uint16_t wrb_index, cid, cri_index;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       if (chip_skh_r(phba->pcidev)) {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                    cid, psol);
        } else {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                    cid, psol);
        }
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
        task = pwrb_handle->pio_handle;
 
        io_task = task->dd_data;
-       spin_lock_bh(&phba->mgmt_sgl_lock);
-       free_mgmt_sgl_handle(phba, io_task->psgl_handle);
-       spin_unlock_bh(&phba->mgmt_sgl_lock);
-       spin_lock_bh(&session->lock);
-       free_wrb_handle(phba, pwrb_context, pwrb_handle);
-       spin_unlock_bh(&session->lock);
+       memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
+       iscsi_put_task(task);
 }
 
 static void
@@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
        hdr = (struct iscsi_nopin *)task->hdr;
        hdr->flags = csol_cqe->i_flags;
        hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
-       hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
-                        csol_cqe->cmd_wnd - 1);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->opcode = ISCSI_OP_NOOP_IN;
        hdr->itt = io_task->libiscsi_itt;
@@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                struct sol_cqe *psol,
                struct common_sol_cqe *csol_cqe)
 {
-       if (chip_skh_r(phba->pcidev)) {
+       if (is_chip_be2_be3r(phba)) {
+               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   i_exp_cmd_sn, psol);
+               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_res_cnt, psol);
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_cmd_wnd, psol);
+               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   wrb_index, psol);
+               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+                                             cid, psol);
+               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                hw_sts, psol);
+               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                i_resp, psol);
+               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                               i_sts, psol);
+               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_flags, psol);
+       } else {
                csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                    i_exp_cmd_sn, psol);
                csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
@@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                                              cid, psol);
                csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                 hw_sts, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                  i_cmd_wnd, psol);
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  cmd_cmpl, psol))
@@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  o, psol))
                        csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
-       } else {
-               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   i_exp_cmd_sn, psol);
-               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_res_cnt, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_cmd_wnd, psol);
-               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   wrb_index, psol);
-               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
-                                             cid, psol);
-               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                hw_sts, psol);
-               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                i_resp, psol);
-               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                               i_sts, psol);
-               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_flags, psol);
        }
 }
 
@@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
        struct iscsi_conn *conn = beiscsi_conn->conn;
        struct iscsi_session *session = conn->session;
        struct common_sol_cqe csol_cqe = {0};
+       uint16_t cri_index = 0;
 
        phwi_ctrlr = phba->phwi_ctrlr;
 
        /* Copy the elements to a common structure */
        adapter_get_sol_cqe(phba, psol, &csol_cqe);
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        pwrb_handle = pwrb_context->pwrb_handle_basestd[
                      csol_cqe.wrb_index];
@@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
        unsigned char is_header = 0;
        unsigned int index, dpl;
 
-       if (chip_skh_r(phba->pcidev)) {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                      index, pdpdu_cqe);
        } else {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                      index, pdpdu_cqe);
        }
 
@@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
 
        WARN_ON(!pasync_handle);
 
-       pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
-                                            phba->fw_config.iscsi_cid_start;
+       pasync_handle->cri =
+                       BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
        pasync_handle->is_header = is_header;
        pasync_handle->buffer_len = dpl;
        *pcq_index = index;
@@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
        }
 
        status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-                                          (beiscsi_conn->beiscsi_conn_cid -
-                                           phba->fw_config.iscsi_cid_start),
                                            phdr, hdr_len, pfirst_buffer,
                                            offset);
 
@@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
        unsigned int num_processed = 0;
        unsigned int tot_nump = 0;
        unsigned short code = 0, cid = 0;
+       uint16_t cri_index = 0;
        struct beiscsi_conn *beiscsi_conn;
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
@@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         32] & CQE_CODE_MASK);
 
                 /* Get the CID */
-               if (chip_skh_r(phba->pcidev)) {
+               if (is_chip_be2_be3r(phba)) {
+                       cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               } else {
                        if ((code == DRIVERMSG_NOTIFY) ||
                            (code == UNSOL_HDR_NOTIFY) ||
                            (code == UNSOL_DATA_NOTIFY))
@@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         else
                                 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                     cid, sol);
-                  } else
-                        cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               }
 
-               ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+               cri_index = BE_GET_CRI_FROM_CID(cid);
+               ep = phba->ep_array[cri_index];
                beiscsi_ep = ep->dd_data;
                beiscsi_conn = beiscsi_ep->conn;
 
@@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
 
 static int be_iopoll(struct blk_iopoll *iop, int budget)
 {
-       static unsigned int ret;
+       unsigned int ret;
        struct beiscsi_hba *phba;
        struct be_eq_obj *pbe_eq;
 
@@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
                /* Check for the data_count */
                dsp_value = (task->data_count) ? 1 : 0;
 
-               if (chip_skh_r(phba->pcidev))
-                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+               if (is_chip_be2_be3r(phba))
+                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
                                      pwrb, dsp_value);
                else
-                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
                                      pwrb, dsp_value);
 
                /* Map addr only if there is data_count */
@@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 
 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
 {
-       struct be_mem_descriptor *mem_descr;
        dma_addr_t bus_add;
+       struct hwi_controller *phwi_ctrlr;
+       struct be_mem_descriptor *mem_descr;
        struct mem_array *mem_arr, *mem_arr_orig;
        unsigned int i, j, alloc_size, curr_alloc_size;
 
@@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
        if (!phba->phwi_ctrlr)
                return -ENOMEM;
 
+       /* Allocate memory for wrb_context */
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
+                                         phba->params.cxns_per_ctrl,
+                                         GFP_KERNEL);
+       if (!phwi_ctrlr->wrb_context)
+               return -ENOMEM;
+
        phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
                                 GFP_KERNEL);
        if (!phba->init_mem) {
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
                               GFP_KERNEL);
        if (!mem_arr_orig) {
                kfree(phba->init_mem);
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2628,6 +2640,7 @@ free_mem:
        }
        kfree(mem_arr_orig);
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
        return -ENOMEM;
 }
@@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 {
        struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
+       struct hwi_context_memory *phwi_ctxt;
        struct wrb_handle *pwrb_handle = NULL;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_wrb_context *pwrb_context;
@@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
        mem_descr_wrb += HWI_MEM_WRB;
        phwi_ctrlr = phba->phwi_ctrlr;
 
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       /* Allocate memory for WRBQ */
+       phwi_ctxt = phwi_ctrlr->phwi_ctxt;
+       phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
+                                    phba->fw_config.iscsi_cid_count,
+                                    GFP_KERNEL);
+       if (!phwi_ctxt->be_wrbq) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : WRBQ Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                pwrb_context->pwrb_handle_base =
                                kzalloc(sizeof(struct wrb_handle *) *
@@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
                }
        }
        idx = 0;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                if (!num_cxn_wrb) {
                        pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@@ -2752,7 +2777,7 @@ init_wrb_hndl_failed:
        return -ENOMEM;
 }
 
-static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
+static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 {
        struct hwi_controller *phwi_ctrlr;
        struct hba_parameters *p = &phba->params;
@@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
        memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
+       pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
+                                         phba->fw_config.iscsi_cid_count,
+                                         GFP_KERNEL);
+       if (!pasync_ctx->async_entry) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
        pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
        pasync_ctx->buffer_size = p->defpdu_hdr_sz;
 
@@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx->async_header.ep_read_ptr = -1;
        pasync_ctx->async_data.host_write_ptr = 0;
        pasync_ctx->async_data.ep_read_ptr = -1;
+
+       return 0;
 }
 
 static int
@@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
        void *wrb_vaddr;
        struct be_dma_mem sgl;
        struct be_mem_descriptor *mem_descr;
+       struct hwi_wrb_context *pwrb_context;
        int status;
 
        idx = 0;
@@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                        kfree(pwrb_arr);
                        return status;
                }
-               phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
-                                                                  id;
+               pwrb_context = &phwi_ctrlr->wrb_context[i];
+               pwrb_context->cid = phwi_context->be_wrbq[i].id;
+               BE_SET_CID_TO_CRI(i, pwrb_context->cid);
        }
        kfree(pwrb_arr);
        return 0;
@@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
        struct hwi_wrb_context *pwrb_context;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                kfree(pwrb_context->pwrb_handle_base);
                kfree(pwrb_context->pwrb_handle_basestd);
@@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
+       struct hwi_async_pdu_context *pasync_ctx;
        int i, eq_num;
 
        phwi_ctrlr = phba->phwi_ctrlr;
@@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                if (q->created)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
        }
+       kfree(phwi_context->be_wrbq);
        free_wrb_handles(phba);
 
        q = &phwi_context->be_def_hdrq;
@@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
        }
        be_mcc_queues_destroy(phba);
+
+       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
+       kfree(pasync_ctx->async_entry);
+       be_cmd_fw_uninit(ctrl);
 }
 
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
@@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
        if (beiscsi_init_wrb_handle(phba))
                return -ENOMEM;
 
-       hwi_init_async_pdu_ctx(phba);
+       if (hwi_init_async_pdu_ctx(phba)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx failed\n");
+               return -ENOMEM;
+       }
+
        if (hwi_init_port(phba) != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : hwi_init_controller failed\n");
@@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
                mem_descr++;
        }
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
 }
 
@@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 
 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
 {
-       int i, new_cid;
+       int i;
 
        phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
                                  GFP_KERNEL);
@@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                return -ENOMEM;
        }
        phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
-                                phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
+                                phba->params.cxns_per_ctrl, GFP_KERNEL);
        if (!phba->ep_array) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed to allocate memory in "
                            "hba_setup_cid_tbls\n");
                kfree(phba->cid_array);
+               phba->cid_array = NULL;
                return -ENOMEM;
        }
-       new_cid = phba->fw_config.iscsi_cid_start;
-       for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-               phba->cid_array[i] = new_cid;
-               new_cid += 2;
+
+       phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
+                                  phba->params.cxns_per_ctrl, GFP_KERNEL);
+       if (!phba->conn_table) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed to allocate memory in"
+                           "hba_setup_cid_tbls\n");
+
+               kfree(phba->cid_array);
+               kfree(phba->ep_array);
+               phba->cid_array = NULL;
+               phba->ep_array = NULL;
+               return -ENOMEM;
        }
+
+       for (i = 0; i < phba->params.cxns_per_ctrl; i++)
+               phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+
        phba->avlbl_cids = phba->params.cxns_per_ctrl;
        return 0;
 }
@@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
        kfree(phba->eh_sgl_hndl_base);
        kfree(phba->cid_array);
        kfree(phba->ep_array);
+       kfree(phba->conn_table);
+}
+
+/**
+ * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
+ * @beiscsi_conn: ptr to the conn to be cleaned up
+ * @task: ptr to iscsi_task resource to be freed.
+ *
+ * Free driver mgmt resources binded to CXN.
+ **/
+void
+beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                               struct iscsi_task *task)
+{
+       struct beiscsi_io_task *io_task;
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       struct hwi_wrb_context *pwrb_context;
+       struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                               beiscsi_conn->beiscsi_conn_cid);
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+       io_task = task->dd_data;
+
+       if (io_task->pwrb_handle) {
+               memset(io_task->pwrb_handle->pwrb, 0,
+                      sizeof(struct iscsi_wrb));
+               free_wrb_handle(phba, pwrb_context,
+                               io_task->pwrb_handle);
+               io_task->pwrb_handle = NULL;
+       }
+
+       if (io_task->psgl_handle) {
+               spin_lock_bh(&phba->mgmt_sgl_lock);
+               free_mgmt_sgl_handle(phba,
+                                    io_task->psgl_handle);
+               io_task->psgl_handle = NULL;
+               spin_unlock_bh(&phba->mgmt_sgl_lock);
+       }
+
+       if (io_task->mtask_addr)
+               pci_unmap_single(phba->pcidev,
+                                io_task->mtask_addr,
+                                io_task->mtask_data_count,
+                                PCI_DMA_TODEVICE);
 }
 
 /**
@@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                            beiscsi_conn->beiscsi_conn_cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
-                       - phba->fw_config.iscsi_cid_start];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        if (io_task->cmd_bhs) {
                pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
                        io_task->psgl_handle = NULL;
                }
        } else {
-               if (!beiscsi_conn->login_in_progress) {
-                       if (io_task->pwrb_handle) {
-                               free_wrb_handle(phba, pwrb_context,
-                                               io_task->pwrb_handle);
-                               io_task->pwrb_handle = NULL;
-                       }
-                       if (io_task->psgl_handle) {
-                               spin_lock(&phba->mgmt_sgl_lock);
-                               free_mgmt_sgl_handle(phba,
-                                                    io_task->psgl_handle);
-                               spin_unlock(&phba->mgmt_sgl_lock);
-                               io_task->psgl_handle = NULL;
-                       }
-                       if (io_task->mtask_addr) {
-                               pci_unmap_single(phba->pcidev,
-                                                io_task->mtask_addr,
-                                                io_task->mtask_data_count,
-                                                PCI_DMA_TODEVICE);
-                               io_task->mtask_addr = 0;
-                       }
-               }
+               if (!beiscsi_conn->login_in_progress)
+                       beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
        }
 }
 
@@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
        beiscsi_cleanup_task(task);
        spin_unlock_bh(&session->lock);
 
-       pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
-                                      phba->fw_config.iscsi_cid_start));
+       pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
 
        /* Check for the adapter family */
-       if (chip_skh_r(phba->pcidev))
-               beiscsi_offload_cxn_v2(params, pwrb_handle);
-       else
+       if (is_chip_be2_be3r(phba))
                beiscsi_offload_cxn_v0(params, pwrb_handle,
                                       phba->init_mem);
+       else
+               beiscsi_offload_cxn_v2(params, pwrb_handle);
 
        be_dws_le_to_cpu(pwrb_handle->pwrb,
                         sizeof(struct iscsi_target_context_update_wrb));
@@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        itt_t itt;
+       uint16_t cri_index = 0;
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        dma_addr_t paddr;
 
@@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        goto free_hndls;
                }
                io_task->pwrb_handle = alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                if (!io_task->pwrb_handle) {
                        beiscsi_log(phba, KERN_ERR,
                                    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        } else {
                io_task->scsi_cmnd = NULL;
                if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+                       beiscsi_conn->task = task;
                        if (!beiscsi_conn->login_in_progress) {
                                spin_lock(&phba->mgmt_sgl_lock);
                                io_task->psgl_handle = (struct sgl_handle *)
@@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                                        io_task->psgl_handle;
                                io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                                if (!io_task->pwrb_handle) {
                                        beiscsi_log(phba, KERN_ERR,
                                                    BEISCSI_LOG_IO |
@@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                io_task->pwrb_handle =
                                                beiscsi_conn->plogin_wrb_handle;
                        }
-                       beiscsi_conn->task = task;
                } else {
                        spin_lock(&phba->mgmt_sgl_lock);
                        io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        }
                        io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                        if (!io_task->pwrb_handle) {
                                beiscsi_log(phba, KERN_ERR,
                                            BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4324,12 +4414,13 @@ free_io_hndls:
 free_mgmt_hndls:
        spin_lock(&phba->mgmt_sgl_lock);
        free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+       io_task->psgl_handle = NULL;
        spin_unlock(&phba->mgmt_sgl_lock);
 free_hndls:
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       beiscsi_conn->beiscsi_conn_cid -
-                       phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(
+       beiscsi_conn->beiscsi_conn_cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (io_task->pwrb_handle)
                free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
        io_task->pwrb_handle = NULL;
@@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
        unsigned int doorbell = 0;
 
        pwrb = io_task->pwrb_handle->pwrb;
-       memset(pwrb, 0, sizeof(*pwrb));
 
        io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
        io_task->bhs_len = sizeof(struct be_cmd_bhs);
@@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
        pwrb = io_task->pwrb_handle->pwrb;
        memset(pwrb, 0, sizeof(*pwrb));
 
-       if (chip_skh_r(phba->pcidev)) {
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
-                             be32_to_cpu(task->cmdsn));
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
-                             io_task->pwrb_handle->wrb_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
-                             io_task->psgl_handle->sgl_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
-                             task->data_count);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
-                             io_task->pwrb_handle->nxt_wrb_index);
-               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
                              be32_to_cpu(task->cmdsn));
                AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task)
                AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
                              io_task->pwrb_handle->nxt_wrb_index);
                pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+       } else {
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+                             be32_to_cpu(task->cmdsn));
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+                             io_task->pwrb_handle->wrb_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+                             io_task->psgl_handle->sgl_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+                             task->data_count);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+                             io_task->pwrb_handle->nxt_wrb_index);
+               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
        }
 
 
@@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
        case ISCSI_OP_NOOP_OUT:
                if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
                        ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 1);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 1);
                } else {
                        ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 0);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 0);
                }
                hwi_write_buffer(pwrb, task);
@@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
        }
 
        /* Set the task type */
-       io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
-               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
-               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
+       io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
+               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
+               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
 
        doorbell |= cid & DB_WRB_POST_CID_MASK;
        doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        case OC_SKH_ID1:
                phba->generation = BE_GEN4;
                phba->iotask_fn = beiscsi_iotask_v2;
+               break;
        default:
                phba->generation = 0;
        }
index 5946577d79d6579f6f4fa4ff8c8eaf156caf9ac5..2c06ef3c02aca49974f6bee2487f09b98c2b1d4d 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
 
 #include "be.h"
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "10.0.272.0"
+#define BUILD_STR              "10.0.467.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -66,8 +66,9 @@
 
 #define MAX_CPUS               64
 #define BEISCSI_MAX_NUM_CPUS   7
-#define OC_SKH_MAX_NUM_CPUS    63
+#define OC_SKH_MAX_NUM_CPUS    31
 
+#define BEISCSI_VER_STRLEN 32
 
 #define BEISCSI_SGLIST_ELEMENTS        30
 
@@ -265,7 +266,9 @@ struct invalidate_command_table {
        unsigned short cid;
 } __packed;
 
-#define chip_skh_r(pdev)       (pdev->device == OC_SKH_ID1)
+#define chip_be2(phba)      (phba->generation == BE_GEN2)
+#define chip_be3_r(phba)    (phba->generation == BE_GEN3)
+#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
 struct beiscsi_hba {
        struct hba_parameters params;
        struct hwi_controller *phwi_ctrlr;
@@ -304,10 +307,15 @@ struct beiscsi_hba {
        unsigned short avlbl_cids;
        unsigned short cid_alloc;
        unsigned short cid_free;
-       struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
        struct list_head hba_queue;
+#define BE_MAX_SESSION 2048
+#define BE_SET_CID_TO_CRI(cri_index, cid) \
+                         (phba->cid_to_cri_map[cid] = cri_index)
+#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
+       unsigned short cid_to_cri_map[BE_MAX_SESSION];
        unsigned short *cid_array;
        struct iscsi_endpoint **ep_array;
+       struct beiscsi_conn **conn_table;
        struct iscsi_boot_kset *boot_kset;
        struct Scsi_Host *shost;
        struct iscsi_iface *ipv4_iface;
@@ -339,6 +347,7 @@ struct beiscsi_hba {
        struct delayed_work beiscsi_hw_check_task;
 
        u8 mac_address[ETH_ALEN];
+       char fw_ver_str[BEISCSI_VER_STRLEN];
        char wq_name[20];
        struct workqueue_struct *wq;    /* The actuak work queue */
        struct be_ctrl_info ctrl;
@@ -563,7 +572,7 @@ struct hwi_async_pdu_context {
         * This is a varying size list! Do not add anything
         * after this entry!!
         */
-       struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
+       struct hwi_async_entry *async_entry;
 };
 
 #define PDUCQE_CODE_MASK       0x0000003F
@@ -749,6 +758,8 @@ void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
 void beiscsi_process_all_cqs(struct work_struct *work);
+void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                                    struct iscsi_task *task);
 
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
@@ -933,7 +944,7 @@ struct hwi_controller {
        struct sgl_handle *psgl_handle_base;
        unsigned int wrb_mem_index;
 
-       struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
+       struct hwi_wrb_context *wrb_context;
        struct mcc_wrb *pmcc_wrb_base;
        struct be_ring default_pdu_hdr;
        struct be_ring default_pdu_data;
@@ -970,9 +981,7 @@ struct hwi_context_memory {
        struct be_queue_info be_def_hdrq;
        struct be_queue_info be_def_dataq;
 
-       struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
-       struct be_mcc_wrb_context *pbe_mcc_context;
-
+       struct be_queue_info *be_wrbq;
        struct hwi_async_pdu_context *pasync_ctx;
 };
 
index 55cc9902263dd84bc047fadb429ea9a45ababa50..245a9595a93a131449fd660a7b7d8bd0e9e7f3b5 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
                            "BM_%d : phba->fw_config.iscsi_features = %d\n",
                            phba->fw_config.iscsi_features);
+               memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+                      firmware_version_string, BEISCSI_VER_STRLEN);
        } else
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BG_%d :  Failed in mgmt_check_supported_fw\n");
@@ -1259,6 +1261,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
 }
 
+/**
+ * beiscsi_fw_ver_disp()- Display Firmware Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Firmware version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
+                    char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
+}
+
+/**
+ * beiscsi_active_cid_disp()- Display Sessions Active
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                      (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+}
+
 /**
  * beiscsi_adap_family_disp()- Display adapter family.
  * @dev: ptr to device to get priv structure
index 2e4968add799f7e91ab53e38edd21cd5e74eaa44..04af7e74fe4884a54a773f0471b264412de726d7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -156,25 +156,25 @@ union invalidate_commands_params {
 } __packed;
 
 struct mgmt_hba_attributes {
-       u8 flashrom_version_string[32];
-       u8 manufacturer_name[32];
+       u8 flashrom_version_string[BEISCSI_VER_STRLEN];
+       u8 manufacturer_name[BEISCSI_VER_STRLEN];
        u32 supported_modes;
        u8 seeprom_version_lo;
        u8 seeprom_version_hi;
        u8 rsvd0[2];
        u32 fw_cmd_data_struct_version;
        u32 ep_fw_data_struct_version;
-       u32 future_reserved[12];
+       u8 ncsi_version_string[12];
        u32 default_extended_timeout;
-       u8 controller_model_number[32];
+       u8 controller_model_number[BEISCSI_VER_STRLEN];
        u8 controller_description[64];
-       u8 controller_serial_number[32];
-       u8 ip_version_string[32];
-       u8 firmware_version_string[32];
-       u8 bios_version_string[32];
-       u8 redboot_version_string[32];
-       u8 driver_version_string[32];
-       u8 fw_on_flash_version_string[32];
+       u8 controller_serial_number[BEISCSI_VER_STRLEN];
+       u8 ip_version_string[BEISCSI_VER_STRLEN];
+       u8 firmware_version_string[BEISCSI_VER_STRLEN];
+       u8 bios_version_string[BEISCSI_VER_STRLEN];
+       u8 redboot_version_string[BEISCSI_VER_STRLEN];
+       u8 driver_version_string[BEISCSI_VER_STRLEN];
+       u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
        u32 functionalities_supported;
        u16 max_cdblength;
        u8 asic_revision;
@@ -190,7 +190,8 @@ struct mgmt_hba_attributes {
        u32 firmware_post_status;
        u32 hba_mtu[8];
        u8 iscsi_features;
-       u8 future_u8[3];
+       u8 asic_generation;
+       u8 future_u8[2];
        u32 future_u32[3];
 } __packed;
 
@@ -207,7 +208,7 @@ struct mgmt_controller_attributes {
        u64 unique_identifier;
        u8 netfilters;
        u8 rsvd0[3];
-       u8 future_u32[4];
+       u32 future_u32[4];
 } __packed;
 
 struct be_mgmt_controller_attributes {
@@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
                               struct device_attribute *attr, char *buf);
 
+ssize_t beiscsi_fw_ver_disp(struct device *dev,
+                            struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_active_cid_disp(struct device *dev,
+                                struct device_attribute *attr, char *buf);
+
 ssize_t beiscsi_adap_family_disp(struct device *dev,
                                  struct device_attribute *attr, char *buf);
 
index 11596b2c4702ae952c7063ab93494a0d5f9bb59f..08b22a901c2590e3aca12ebcfe4484e143c54460 100644 (file)
@@ -2,7 +2,7 @@
 #define _BNX2FC_H_
 /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.13"
+#define BNX2FC_VERSION         "1.0.14"
 
 #define PFX                    "bnx2fc: "
 
+#define BCM_CHIP_LEN           16
+
 #define BNX2X_DOORBELL_PCI_BAR         2
 
 #define BNX2FC_MAX_BD_LEN              0xffff
@@ -241,6 +243,8 @@ struct bnx2fc_hba {
        int wait_for_link_down;
        int num_ofld_sess;
        struct list_head vports;
+
+       char chip_num[BCM_CHIP_LEN];
 };
 
 struct bnx2fc_interface {
index bdbbb13b8534c2318464b1b9a803c554af2b17ac..b1c9a4f8caee852f2cbf16b3735a7f7d912bb014 100644 (file)
@@ -3,7 +3,7 @@
  * This file contains helper routines that handle ELS requests
  * and responses.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 7dffec1e57158469328e670df896767227261ae7..69ac55495c1d7479d75f79b13fdb22659875b95c 100644 (file)
@@ -3,7 +3,7 @@
  * cnic modules to create FCoE instances, send/receive non-offloaded
  * FIP/FCoE packets, listen to link events etc.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Dec 21, 2012"
+#define DRV_MODULE_RELDATE     "Mar 08, 2013"
 
 
 static char version[] =
@@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
 {
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_interface *interface = port->priv;
+       struct bnx2fc_hba *hba = interface->hba;
        struct Scsi_Host *shost = lport->host;
        int rc = 0;
 
@@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
        }
        if (!lport->vport)
                fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
-       sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
-               BNX2FC_NAME, BNX2FC_VERSION,
+       snprintf(fc_host_symbolic_name(lport->host), 256,
+                "%s (Broadcom %s) v%s over %s",
+               BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
                interface->netdev->name);
 
        return 0;
@@ -1656,23 +1658,60 @@ mem_err:
 static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
 {
        struct cnic_dev *cnic;
+       struct pci_dev *pdev;
 
        if (!hba->cnic) {
                printk(KERN_ERR PFX "cnic is NULL\n");
                return -ENODEV;
        }
        cnic = hba->cnic;
-       hba->pcidev = cnic->pcidev;
-       if (hba->pcidev)
-               pci_dev_get(hba->pcidev);
+       pdev = hba->pcidev = cnic->pcidev;
+       if (!hba->pcidev)
+               return -ENODEV;
 
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_NX2_57710:
+               strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57711:
+               strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57712:
+       case PCI_DEVICE_ID_NX2_57712_MF:
+       case PCI_DEVICE_ID_NX2_57712_VF:
+               strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57800:
+       case PCI_DEVICE_ID_NX2_57800_MF:
+       case PCI_DEVICE_ID_NX2_57800_VF:
+               strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57810:
+       case PCI_DEVICE_ID_NX2_57810_MF:
+       case PCI_DEVICE_ID_NX2_57810_VF:
+               strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57840:
+       case PCI_DEVICE_ID_NX2_57840_MF:
+       case PCI_DEVICE_ID_NX2_57840_VF:
+       case PCI_DEVICE_ID_NX2_57840_2_20:
+       case PCI_DEVICE_ID_NX2_57840_4_10:
+               strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
+               break;
+       default:
+               pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
+               break;
+       }
+       pci_dev_get(hba->pcidev);
        return 0;
 }
 
 static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
 {
-       if (hba->pcidev)
+       if (hba->pcidev) {
+               hba->chip_num[0] = '\0';
                pci_dev_put(hba->pcidev);
+       }
        hba->pcidev = NULL;
 }
 
index 50510ffe1bf59444b2dcf28cd8c1345b41b3269a..c0d035a8f8f9e79a08a0b313742c2f7b0009ca6f 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains the code that low level functions that interact
  * with 57712 FCoE firmware.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
        fcoe_init3.error_bit_map_lo = 0xffffffff;
        fcoe_init3.error_bit_map_hi = 0xffffffff;
 
-       fcoe_init3.perf_config = 1;
+       /*
+        * enable both cached connection and cached tasks
+        * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
+        */
+       fcoe_init3.perf_config = 3;
 
        kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
        kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
index 723a9a8ba5ee28f48de2e67854205f5749287cee..575142e92d9c5219b10fb286d23a12ee1580db58 100644 (file)
@@ -1,7 +1,7 @@
 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
  * IO manager and SCSI IO processing.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
 
        spin_lock_bh(&tgt->tgt_lock);
        io_req->wait_for_comp = 0;
-       if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
-                                   &io_req->req_flags))) {
+       if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+               BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
+               rc = SUCCESS;
+       } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+                                     &io_req->req_flags))) {
                /* Let the scsi-ml try to recover this command */
                printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
                       io_req->xid);
index c57a3bb8a9fbfe1944e796a67df7cf81efec23c8..4d93177dfb530c4446d959bd97fb71cd5dee86d1 100644 (file)
@@ -2,7 +2,7 @@
  * Handles operations such as session offload/upload etc, and manages
  * session resources such as connection id and qp resources.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 0f9c04175b11d4894f34157bf8177d6956c2f9c7..372a67d122d38fc161743c105db229c534d26a89 100644 (file)
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
        uint32_t        n_rnode_match;  /* matched rnode */
        uint32_t        n_dev_loss_tmo; /* Device loss timeout */
        uint32_t        n_fdmi_err;     /* fdmi err */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT];   /* State m/c events */
        uint32_t        n_rnode_alloc;  /* rnode allocated */
        uint32_t        n_rnode_free;   /* rnode freed */
index 65940096a80d8426eb438392bf544ecee1d5a1e1..43343422122202c9614d4b76539c5a5d20e2fab6 100644 (file)
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
        uint32_t        n_err_nomem;    /* error nomem */
        uint32_t        n_evt_unexp;    /* unexpected event */
        uint32_t        n_evt_drop;     /* unexpected event */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT];  /* State m/c events */
        uint32_t        n_lun_rst;      /* Number of resets of
                                         * of LUNs under this
index 98436c3630359f7e348cb496ed4a719575675e2d..b6d1f92ed33cc17e67ff4cbe1b3c424f5f75bd0e 100644 (file)
@@ -38,7 +38,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.2"
+#define DRV_VERSION            "1.5.0.22"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -192,6 +192,18 @@ enum fnic_state {
 
 struct mempool;
 
+enum fnic_evt {
+       FNIC_EVT_START_VLAN_DISC = 1,
+       FNIC_EVT_START_FCF_DISC = 2,
+       FNIC_EVT_MAX,
+};
+
+struct fnic_event {
+       struct list_head list;
+       struct fnic *fnic;
+       enum fnic_evt event;
+};
+
 /* Per-instance private data structure */
 struct fnic {
        struct fc_lport *lport;
@@ -254,6 +266,18 @@ struct fnic {
        struct sk_buff_head frame_queue;
        struct sk_buff_head tx_queue;
 
+       /*** FIP related data members  -- start ***/
+       void (*set_vlan)(struct fnic *, u16 vlan);
+       struct work_struct      fip_frame_work;
+       struct sk_buff_head     fip_frame_queue;
+       struct timer_list       fip_timer;
+       struct list_head        vlans;
+       spinlock_t              vlans_lock;
+
+       struct work_struct      event_work;
+       struct list_head        evlist;
+       /*** FIP related data members  -- end ***/
+
        /* copy work queue cache line section */
        ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
        /* completion queue cache line section */
@@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
 }
 
 extern struct workqueue_struct *fnic_event_queue;
+extern struct workqueue_struct *fnic_fip_queue;
 extern struct device_attribute *fnic_attrs[];
 
 void fnic_clear_intr_mode(struct fnic *fnic);
@@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *);
 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
 void fnic_handle_frame(struct work_struct *work);
 void fnic_handle_link(struct work_struct *work);
+void fnic_handle_event(struct work_struct *work);
 int fnic_rq_cmpl_handler(struct fnic *fnic, int);
 int fnic_alloc_rq_frame(struct vnic_rq *rq);
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic);
 
 int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
 
+void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_handle_fip_event(struct fnic *fnic);
+void fnic_fcoe_reset_vlans(struct fnic *fnic);
+void fnic_fcoe_evlist_free(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct fnic *fnic);
+
 static inline int
 fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
 {
index 483eb9dbe66366377c5dd4f049c15a160ca2a500..006fa92a02df0b4178f2ce88c73dffd8635cb5fc 100644 (file)
 #include <scsi/libfc.h>
 #include "fnic_io.h"
 #include "fnic.h"
+#include "fnic_fip.h"
 #include "cq_enet_desc.h"
 #include "cq_exch_desc.h"
 
+static u8 fcoe_all_fcfs[ETH_ALEN];
+struct workqueue_struct *fnic_fip_queue;
 struct workqueue_struct *fnic_event_queue;
 
 static void fnic_set_eth_mode(struct fnic *);
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
 
 void fnic_handle_link(struct work_struct *work)
 {
@@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work)
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link down\n");
                                fcoe_ctlr_link_down(&fnic->ctlr);
+                               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                                       /* start FCoE VLAN discovery */
+                                       fnic_fcoe_send_vlan_req(fnic);
+                                       return;
+                               }
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link up\n");
                                fcoe_ctlr_link_up(&fnic->ctlr);
@@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work)
        } else if (fnic->link_status) {
                /* DOWN -> UP */
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                       /* start FCoE VLAN discovery */
+                       fnic_fcoe_send_vlan_req(fnic);
+                       return;
+               }
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
                fcoe_ctlr_link_up(&fnic->ctlr);
        } else {
@@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work)
        }
 }
 
+void fnic_fcoe_evlist_free(struct fnic *fnic)
+{
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               list_del(&fevt->list);
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fnic_handle_event(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, event_work);
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               if (fnic->stop_rx_link_events) {
+                       list_del(&fevt->list);
+                       kfree(fevt);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+
+               list_del(&fevt->list);
+               switch (fevt->event) {
+               case FNIC_EVT_START_VLAN_DISC:
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       fnic_fcoe_send_vlan_req(fnic);
+                       spin_lock_irqsave(&fnic->fnic_lock, flags);
+                       break;
+               case FNIC_EVT_START_FCF_DISC:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Start FCF Discovery\n");
+                       fnic_fcoe_start_fcf_disc(fnic);
+                       break;
+               default:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Unknown event 0x%x\n", fevt->event);
+                       break;
+               }
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/**
+ * Check if the Received FIP FLOGI frame is rejected
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is rejected with unsupported cmd with
+ * insufficient resource els explanation.
+ */
+static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
+                                        struct sk_buff *skb)
+{
+       struct fc_lport *lport = fip->lp;
+       struct fip_header *fiph;
+       struct fc_frame_header *fh = NULL;
+       struct fip_desc *desc;
+       struct fip_encaps *els;
+       enum fip_desc_type els_dtype = 0;
+       u16 op;
+       u8 els_op;
+       u8 sub;
+
+       size_t els_len = 0;
+       size_t rlen;
+       size_t dlen = 0;
+
+       if (skb_linearize(skb))
+               return 0;
+
+       if (skb->len < sizeof(*fiph))
+               return 0;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (op != FIP_OP_LS)
+               return 0;
+
+       if (sub != FIP_SC_REP)
+               return 0;
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       if (rlen + sizeof(*fiph) > skb->len)
+               return 0;
+
+       desc = (struct fip_desc *)(fiph + 1);
+       dlen = desc->fip_dlen * FIP_BPW;
+
+       if (desc->fip_dtype == FIP_DT_FLOGI) {
+
+               shost_printk(KERN_DEBUG, lport->host,
+                         " FIP TYPE FLOGI: fab name:%llx "
+                         "vfid:%d map:%x\n",
+                         fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
+                         fip->sel_fcf->fc_map);
+               if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+                       return 0;
+
+               els_len = dlen - sizeof(*els);
+               els = (struct fip_encaps *)desc;
+               fh = (struct fc_frame_header *)(els + 1);
+               els_dtype = desc->fip_dtype;
+
+               if (!fh)
+                       return 0;
+
+               /*
+                * ELS command code, reason and explanation should be = Reject,
+                * unsupported command and insufficient resource
+                */
+               els_op = *(u8 *)(fh + 1);
+               if (els_op == ELS_LS_RJT) {
+                       shost_printk(KERN_INFO, lport->host,
+                                 "Flogi Request Rejected by Switch\n");
+                       return 1;
+               }
+               shost_printk(KERN_INFO, lport->host,
+                               "Flogi Request Accepted by Switch\n");
+       }
+       return 0;
+}
+
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct sk_buff *skb;
+       char *eth_fr;
+       int fr_len;
+       struct fip_vlan *vlan;
+       u64 vlan_tov;
+
+       fnic_fcoe_reset_vlans(fnic);
+       fnic->set_vlan(fnic, 0);
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Sending VLAN request...\n");
+       skb = dev_alloc_skb(sizeof(struct fip_vlan));
+       if (!skb)
+               return;
+
+       fr_len = sizeof(*vlan);
+       eth_fr = (char *)skb->data;
+       vlan = (struct fip_vlan *)eth_fr;
+
+       memset(vlan, 0, sizeof(*vlan));
+       memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+       memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
+       vlan->eth.h_proto = htons(ETH_P_FIP);
+
+       vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+       vlan->fip.fip_op = htons(FIP_OP_VLAN);
+       vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+       vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
+
+       vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+       vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
+       memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+       vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+       vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
+       put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+
+       skb_put(skb, sizeof(*vlan));
+       skb->protocol = htons(ETH_P_FIP);
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       fip->send(fip, skb);
+
+       /* set a timer so that we can retry if there no response */
+       vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+       mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
+}
+
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct fip_header *fiph;
+       struct fip_desc *desc;
+       u16 vid;
+       size_t rlen;
+       size_t dlen;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+       unsigned long flags;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response...\n");
+
+       fiph = (struct fip_header *) skb->data;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
+                 ntohs(fiph->fip_op), fiph->fip_subcode);
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       fnic_fcoe_reset_vlans(fnic);
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       desc = (struct fip_desc *)(fiph + 1);
+       while (rlen > 0) {
+               dlen = desc->fip_dlen * FIP_BPW;
+               switch (desc->fip_dtype) {
+               case FIP_DT_VLAN:
+                       vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "process_vlan_resp: FIP VLAN %d\n", vid);
+                       vlan = kmalloc(sizeof(*vlan),
+                                                       GFP_ATOMIC);
+                       if (!vlan) {
+                               /* retry from timer */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               goto out;
+                       }
+                       memset(vlan, 0, sizeof(struct fcoe_vlan));
+                       vlan->vid = vid & 0x0fff;
+                       vlan->state = FIP_VLAN_AVAIL;
+                       list_add_tail(&vlan->list, &fnic->vlans);
+                       break;
+               }
+               desc = (struct fip_desc *)((char *)desc + dlen);
+               rlen -= dlen;
+       }
+
+       /* any VLAN descriptors present ? */
+       if (list_empty(&fnic->vlans)) {
+               /* retry from timer */
+               FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                         "No VLAN descriptors in FIP VLAN response\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               goto out;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count++;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(fip);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+out:
+       return;
+}
+
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count = 1;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(&fnic->ctlr);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+}
+
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
+{
+       unsigned long flags;
+       struct fcoe_vlan *fvlan;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return -EINVAL;
+       }
+
+       fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       if (fvlan->state == FIP_VLAN_USED) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+
+       if (fvlan->state == FIP_VLAN_SENT) {
+               fvlan->state = FIP_VLAN_USED;
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+       return -EINVAL;
+}
+
+static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
+{
+       struct fnic_event *fevt;
+       unsigned long flags;
+
+       fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
+       if (!fevt)
+               return;
+
+       fevt->fnic = fnic;
+       fevt->event = ev;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       list_add_tail(&fevt->list, &fnic->evlist);
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       schedule_work(&fnic->event_work);
+}
+
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fip_header *fiph;
+       int ret = 1;
+       u16 op;
+       u8 sub;
+
+       if (!skb || !(skb->data))
+               return -1;
+
+       if (skb_linearize(skb))
+               goto drop;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+               goto drop;
+
+       if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+               goto drop;
+
+       if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
+               if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
+                       goto drop;
+               /* pass it on to fcoe */
+               ret = 1;
+       } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
+               /* set the vlan as used */
+               fnic_fcoe_process_vlan_resp(fnic, skb);
+               ret = 0;
+       } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
+               /* received CVL request, restart vlan disc */
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               /* pass it on to fcoe */
+               ret = 1;
+       }
+drop:
+       return ret;
+}
+
+void fnic_handle_fip_frame(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+       unsigned long flags;
+       struct sk_buff *skb;
+       struct ethhdr *eh;
+
+       while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
+               spin_lock_irqsave(&fnic->fnic_lock, flags);
+               if (fnic->stop_rx_link_events) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       dev_kfree_skb(skb);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       skb_queue_head(&fnic->fip_frame_queue, skb);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               eh = (struct ethhdr *)skb->data;
+               if (eh->h_proto == htons(ETH_P_FIP)) {
+                       skb_pull(skb, sizeof(*eh));
+                       if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       /*
+                        * If there's FLOGI rejects - clear all
+                        * fcf's & restart from scratch
+                        */
+                       if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "Trigger a Link down - VLAN Disc\n");
+                               fcoe_ctlr_link_down(&fnic->ctlr);
+                               /* start FCoE VLAN discovery */
+                               fnic_fcoe_send_vlan_req(fnic);
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       fcoe_ctlr_recv(&fnic->ctlr, skb);
+                       continue;
+               }
+       }
+}
+
 /**
  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  * @fnic:      fnic instance.
@@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
                skb_reset_mac_header(skb);
        }
        if (eh->h_proto == htons(ETH_P_FIP)) {
-               skb_pull(skb, sizeof(*eh));
-               fcoe_ctlr_recv(&fnic->ctlr, skb);
+               if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
+                       printk(KERN_ERR "Dropped FIP frame, as firmware "
+                                       "uses non-FIP mode, Enable FIP "
+                                       "using UCSM\n");
+                       goto drop;
+               }
+               skb_queue_tail(&fnic->fip_frame_queue, skb);
+               queue_work(fnic_fip_queue, &fnic->fip_frame_work);
                return 1;               /* let caller know packet was used */
        }
        if (eh->h_proto != htons(ETH_P_FCOE))
@@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
        dev_kfree_skb(fp_skb(fp));
        buf->os_buf = NULL;
 }
+
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       struct fcoe_vlan *next;
+
+       /*
+        * indicate a link down to fcoe so that all fcf's are free'd
+        * might not be required since we did this before sending vlan
+        * discovery request
+        */
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (!list_empty(&fnic->vlans)) {
+               list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
+                       list_del(&vlan->list);
+                       kfree(vlan);
+               }
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+}
+
+void fnic_handle_fip_timer(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (fnic->stop_rx_link_events) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       if (fnic->ctlr.mode == FIP_ST_NON_FIP)
+               return;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               /* no vlans available, try again */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               return;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       shost_printk(KERN_DEBUG, fnic->lport->host,
+                 "fip_timer: vlan %d state %d sol_count %d\n",
+                 vlan->vid, vlan->state, vlan->sol_count);
+       switch (vlan->state) {
+       case FIP_VLAN_USED:
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "FIP VLAN is selected for FC transaction\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               break;
+       case FIP_VLAN_FAILED:
+               /* if all vlans are in failed state, restart vlan disc */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               break;
+       case FIP_VLAN_SENT:
+               if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+                       /*
+                        * no response on this vlan, remove  from the list.
+                        * Try the next vlan
+                        */
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "Dequeue this VLAN ID %d from list\n",
+                                 vlan->vid);
+                       list_del(&vlan->list);
+                       kfree(vlan);
+                       vlan = NULL;
+                       if (list_empty(&fnic->vlans)) {
+                               /* we exhausted all vlans, restart vlan disc */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "fip_timer: vlan list empty, "
+                                         "trigger vlan disc\n");
+                               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+                               return;
+                       }
+                       /* check the next vlan */
+                       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
+                                                       list);
+                       fnic->set_vlan(fnic, vlan->vid);
+                       vlan->state = FIP_VLAN_SENT; /* sent now */
+               }
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               vlan->sol_count++;
+               sol_time = jiffies + msecs_to_jiffies
+                                       (FCOE_CTLR_START_DELAY);
+               mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+               break;
+       }
+}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644 (file)
index 0000000..87e74c2
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FNIC_FIP_H_
+#define _FNIC_FIP_H_
+
+
+#define FCOE_CTLR_START_DELAY    2000    /* ms after first adv. to choose FCF */
+#define FCOE_CTLR_FIPVLAN_TOV    2000    /* ms after FIP VLAN disc */
+#define FCOE_CTLR_MAX_SOL        8
+
+#define FINC_MAX_FLOGI_REJECTS   8
+
+/*
+ * FIP_DT_VLAN descriptor.
+ */
+struct fip_vlan_desc {
+       struct fip_desc fd_desc;
+       __be16 fd_vlan;
+} __attribute__((packed));
+
+struct vlan {
+       __be16 vid;
+       __be16 type;
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+       struct list_head list;
+       u16 vid;                /* vlan ID */
+       u16 sol_count;          /* no. of sols sent */
+       u16 state;              /* state */
+};
+
+enum fip_vlan_state {
+       FIP_VLAN_AVAIL  = 0,    /* don't do anything */
+       FIP_VLAN_SENT   = 1,    /* sent */
+       FIP_VLAN_USED   = 2,    /* succeed */
+       FIP_VLAN_FAILED = 3,    /* failed to response */
+};
+
+struct fip_vlan {
+       struct ethhdr eth;
+       struct fip_header fip;
+       struct {
+               struct fip_mac_desc mac;
+               struct fip_wwn_desc wwnn;
+       } desc;
+};
+
+#endif  /* __FINC_FIP_H_ */
index d601ac543c52bd1f24b50a760c240ab2c374f62c..5f09d1814d2685e86b06d9c6d697659d06cade07 100644 (file)
@@ -39,6 +39,7 @@
 #include "vnic_intr.h"
 #include "vnic_stats.h"
 #include "fnic_io.h"
+#include "fnic_fip.h"
 #include "fnic.h"
 
 #define PCI_DEVICE_ID_CISCO_FNIC       0x0045
@@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data)
                  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 }
 
+static void fnic_fip_notify_timer(unsigned long data)
+{
+       struct fnic *fnic = (struct fnic *)data;
+
+       fnic_handle_fip_timer(fnic);
+}
+
 static void fnic_notify_timer_start(struct fnic *fnic)
 {
        switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
        return fnic->data_src_addr;
 }
 
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+{
+       u16 old_vlan;
+       old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
+
 static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct Scsi_Host *host;
@@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
                vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
                vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+               fnic->set_vlan = fnic_set_vlan;
                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
+               setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
+                                                       (unsigned long)fnic);
+               spin_lock_init(&fnic->vlans_lock);
+               INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
+               INIT_WORK(&fnic->event_work, fnic_handle_event);
+               skb_queue_head_init(&fnic->fip_frame_queue);
+               spin_lock_irqsave(&fnic_list_lock, flags);
+               if (!fnic_fip_queue) {
+                       fnic_fip_queue =
+                               create_singlethread_workqueue("fnic_fip_q");
+                       if (!fnic_fip_queue) {
+                               spin_unlock_irqrestore(&fnic_list_lock, flags);
+                               printk(KERN_ERR PFX "fnic FIP work queue "
+                                                "create failed\n");
+                               err = -ENOMEM;
+                               goto err_out_free_max_pool;
+                       }
+               }
+               spin_unlock_irqrestore(&fnic_list_lock, flags);
+               INIT_LIST_HEAD(&fnic->evlist);
+               INIT_LIST_HEAD(&fnic->vlans);
        } else {
                shost_printk(KERN_INFO, fnic->lport->host,
                             "firmware uses non-FIP mode\n");
@@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev)
        skb_queue_purge(&fnic->frame_queue);
        skb_queue_purge(&fnic->tx_queue);
 
+       if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+               del_timer_sync(&fnic->fip_timer);
+               skb_queue_purge(&fnic->fip_frame_queue);
+               fnic_fcoe_reset_vlans(fnic);
+               fnic_fcoe_evlist_free(fnic);
+       }
+
        /*
         * Log off the fabric. This stops all remote ports, dns port,
         * logs off the fabric. This flushes all rport, disc, lport work
@@ -889,8 +932,8 @@ static int __init fnic_init_module(void)
        len = sizeof(struct fnic_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
                ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
-                SLAB_HWCACHE_ALIGN,
-                NULL);
+                 SLAB_HWCACHE_ALIGN,
+                 NULL);
        if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
                printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
                err = -ENOMEM;
@@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void)
 {
        pci_unregister_driver(&fnic_driver);
        destroy_workqueue(fnic_event_queue);
+       if (fnic_fip_queue) {
+               flush_workqueue(fnic_fip_queue);
+               destroy_workqueue(fnic_fip_queue);
+       }
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        kmem_cache_destroy(fnic_io_req_cache);
index b576be734e2e04a342cd031f75c146096bc1c2f5..9795d6f3e1974e47e98f6d28ef80f1e4f282e528 100644 (file)
@@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
        return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
 }
 
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
+{
+       u64 a0 = new_default_vlan, a1 = 0;
+       int wait = 1000;
+       int old_vlan = 0;
+
+       old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
+       return (u16)old_vlan;
+}
+
 int vnic_dev_link_status(struct vnic_dev *vdev)
 {
        if (vdev->linkstatus)
index f9935a8a5a0932e14b4f896627ebe59567f8a1d9..40d4195f562bad4f40d406d3e28e48a018b6c213 100644 (file)
@@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev);
 int vnic_dev_open(struct vnic_dev *vdev, int arg);
 int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
 int vnic_dev_init(struct vnic_dev *vdev, int arg);
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
+                               u16 new_default_vlan);
 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
index 7c9ccbd4134b722dad20643314fa719fdcffb89c..3e2fcbda6aedad54315aac57e07664df171306db 100644 (file)
@@ -196,6 +196,73 @@ enum vnic_devcmd_cmd {
 
        /* undo initialize of virtual link */
        CMD_DEINIT              = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+       /* check fw capability of a cmd:
+        * in:  (u32)a0=cmd
+        * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+       CMD_CAPABILITY      = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+       /* persistent binding info
+        * in:  (u64)a0=paddr of arg
+        *      (u32)a1=CMD_PERBI_XXX */
+       CMD_PERBI       = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+       /* Interrupt Assert Register functionality
+        * in: (u16)a0=interrupt number to assert
+        */
+       CMD_IAR         = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+       /* initiate hangreset, like softreset after hang detected */
+       CMD_HANG_RESET      = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+       /* hangreset status:
+        *    out: a0=0 reset complete, a0=1 reset in progress */
+       CMD_HANG_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+       /*
+        * Set hw ingress packet vlan rewrite mode:
+        * in:  (u32)a0=new vlan rewrite mode
+        * out: (u32)a0=old vlan rewrite mode */
+       CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+       /*
+        * in:  (u16)a0=bdf of target vnic
+        *      (u32)a1=cmd to proxy
+        *      a2-a15=args to cmd in a1
+        * out: (u32)a0=status of proxied cmd
+        *      a1-a15=out args of proxied cmd */
+       CMD_PROXY_BY_BDF =  _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+       /*
+        * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+        * or SR-IOV virtual vnic
+        */
+       CMD_PROXY_BY_INDEX =    _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+       /*
+        * For HPP toggle:
+        * adapter-info-get
+        * in:  (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=size of buffer specified in a0.
+        * out: (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+        *              0 if no VIF-CONFIG-INFO TLV was ever received. */
+       CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+       /*
+        * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+        *            (u32)a1=INT13_CMD_xxx
+        */
+       CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+       /*
+        * Set default vlan:
+        * in: (u16)a0=new default vlan
+        *     (u16)a1=zero for overriding vlan with param a0,
+        *             non-zero for resetting vlan to the default
+        * out: (u16)a0=old default vlan
+        */
+       CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
 };
 
 /* flags for CMD_OPEN */
index cc82d0f322b6cb30fd3fdaf1fd5e49665302dece..4e31caa21ddfba379036b6a7b4c085a5f916549f 100644 (file)
@@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                return 0;
        }
 
-       if (vhost->state == IBMVFC_ACTIVE) {
+       if (vhost->logged_in) {
                evt = ibmvfc_get_event(vhost);
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 
@@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                tmf->common.length = sizeof(*tmf);
                tmf->scsi_id = rport->port_id;
                int_to_scsilun(sdev->lun, &tmf->lun);
-               tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
+                       type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+               if (vhost->state == IBMVFC_ACTIVE)
+                       tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               else
+                       tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
                tmf->cancel_key = (unsigned long)sdev->hostdata;
                tmf->my_cancel_key = (unsigned long)starget->hostdata;
 
@@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
        timeout = wait_for_completion_timeout(&evt->comp, timeout);
 
        if (!timeout) {
-               rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               rc = ibmvfc_cancel_all(sdev, 0);
                if (!rc) {
                        rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
                        if (rc == SUCCESS)
@@ -2383,24 +2388,30 @@ out:
  * @cmd:       scsi command to abort
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, abort_rc;
+       int cancel_rc, block_rc;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-       abort_rc = ibmvfc_abort_task_set(sdev);
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               ibmvfc_abort_task_set(sdev);
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
-       if (!cancel_rc && !abort_rc)
+       if (!cancel_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2410,28 +2421,46 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, reset_rc;
+       int cancel_rc, block_rc, reset_rc = 0;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
 
+/**
+ * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
+ * @sdev:      scsi device struct
+ * @data:      return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
+{
+       unsigned long *rc = data;
+       *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+}
+
 /**
  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
  * @sdev:      scsi device struct
@@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
        struct scsi_target *starget = scsi_target(sdev);
-       int reset_rc;
+       int block_rc;
+       int reset_rc = 0;
        int rc = FAILED;
        unsigned long cancel_rc = 0;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       if (block_rc != FAST_IO_FAIL) {
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       } else
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
  **/
 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-       int rc;
+       int rc, block_rc;
        struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
 
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        dev_err(vhost->dev, "Resetting connection due to error recovery\n");
        rc = ibmvfc_issue_fc_host_lip(vhost->host);
+
+       if (block_rc == FAST_IO_FAIL)
+               return FAST_IO_FAIL;
+
        return rc ? FAILED : SUCCESS;
 }
 
@@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
                dev_rport = starget_to_rport(scsi_target(sdev));
                if (dev_rport != rport)
                        continue;
-               ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-               ibmvfc_abort_task_set(sdev);
+               ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
        }
 
        rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
index 3be8af624e6fce888ab2b148c66321a2ce30558c..017a5290e8c12ba5d27c89a390c4ed6490e05000 100644 (file)
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME    "ibmvfc"
-#define IBMVFC_DRIVER_VERSION          "1.0.10"
-#define IBMVFC_DRIVER_DATE             "(August 24, 2012)"
+#define IBMVFC_DRIVER_VERSION          "1.0.11"
+#define IBMVFC_DRIVER_DATE             "(April 12, 2013)"
 
 #define IBMVFC_DEFAULT_TIMEOUT 60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT    45
@@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp {
        u16 error;
        u32 flags;
 #define IBMVFC_NATIVE_FC               0x01
-#define IBMVFC_CAN_FLUSH_ON_HALT       0x08
        u32 reserved;
        u64 capabilities;
 #define IBMVFC_CAN_FLUSH_ON_HALT       0x08
+#define IBMVFC_CAN_SUPPRESS_ABTS       0x10
        u32 max_cmds;
        u32 scsi_id_sz;
        u64 max_dma_len;
@@ -351,6 +351,7 @@ struct ibmvfc_tmf {
 #define IBMVFC_TMF_LUN_RESET           0x10
 #define IBMVFC_TMF_TGT_RESET           0x20
 #define IBMVFC_TMF_LUA_VALID           0x40
+#define IBMVFC_TMF_SUPPRESS_ABTS       0x80
        u32 cancel_key;
        u32 my_cancel_key;
        u32 pad;
index 2197b57fb2251f541e0bfc0eab626ce670b66504..82a3c1ec8706600473c4daedd33b248d2c94518f 100644 (file)
@@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
-       if (!ioa_cfg->in_reset_reload) {
+       if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
                dev_err(&ioa_cfg->pdev->dev,
                        "Adapter being reset as a result of error recovery.\n");
@@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
 {
        u32 ioadl_flags = 0;
        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
        struct ipr_ioadl64_desc *last_ioadl64 = NULL;
        int len = qc->nbytes;
        struct scatterlist *sg;
@@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
        ioarcb->ioadl_len =
                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
        ioarcb->u.sis64_addr_data.data_ioadl_addr =
-               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
+               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
 
        for_each_sg(qc->sg, sg, qc->n_elem, si) {
                ioadl64->flags = cpu_to_be32(ioadl_flags);
@@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int i;
 
        ENTER;
        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 
        ioa_cfg->in_reset_reload = 0;
        ioa_cfg->reset_retries = 0;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].ioa_is_dead = 1;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
+
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
        LEAVE;
@@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
                ioa_cfg->sdt_state = ABORT_DUMP;
-       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
+       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
        ioa_cfg->in_ioa_bringdown = 1;
        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
                spin_lock(&ioa_cfg->hrrq[i]._lock);
index 21a6ff1ed5c6b1afb49c38d191372bc59533780d..a1fb840596ef08cb129c8301dd81a34d24c73fd5 100644 (file)
@@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs {        /* 22 bytes */
        u8 hob_lbam;
        u8 hob_lbah;
        u8 ctl;
-}__attribute__ ((packed, aligned(4)));
+}__attribute__ ((packed, aligned(2)));
 
 struct ipr_ioadl_desc {
        __be32 flags_and_data_len;
index c3aa6c5457b9c386e7237db6201dfb1f8603b83c..96a26f454673cf8b9cad6abd38ee9a28104c6a95 100644 (file)
@@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
        struct isci_host *ihost = idev->owning_port->owning_controller;
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
                sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
        } else if (dev_is_expander(dev)) {
                sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
        struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct isci_host *ihost = idev->owning_port->owning_controller;
 
                isci_remote_device_not_ready(ihost, idev,
index 7674caae1d88bac82520c65b0cd6004e19db626f..47a013fffae73c1d416b68fc6b81dc5274bf2c46 100644 (file)
@@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
 
 static inline bool dev_is_expander(struct domain_device *dev)
 {
-       return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+       return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
 }
 
 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
index 9594ab62702b1786d51d7ac80f7e10b365b6ce61..e3e3bcbd5a9fcf4245bf9d28b53c0274ca642c2d 100644 (file)
@@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
        /* all unaccelerated request types (non ssp or ncq) handled with
         * substates
         */
-       if (!task && dev->dev_type == SAS_END_DEV) {
+       if (!task && dev->dev_type == SAS_END_DEVICE) {
                state = SCI_REQ_TASK_WAIT_TC_COMP;
        } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
                state = SCI_REQ_SMP_WAIT_RESP;
@@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
        if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
                return SCI_FAILURE_INVALID_REMOTE_DEVICE;
 
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                /* pass */;
        else if (dev_is_sata(dev))
                memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
        /* Build the common part of the request */
        sci_general_request_construct(ihost, idev, ireq);
 
-       if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
+       if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
                set_bit(IREQ_TMF, &ireq->flags);
                memset(ireq->tc, 0, sizeof(struct scu_task_context));
 
index b6f19a1db780f7303abc9796c19553b058c89ada..9bb020ac089cddf6b665e8a52f728c95621ece34 100644 (file)
@@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
        }
 
        /* XXX convert to get this from task->tproto like other drivers */
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                isci_tmf->proto = SAS_PROTOCOL_SSP;
                status = sci_task_request_construct_ssp(ireq);
                if (status != SCI_SUCCESS)
index bdb81cda84013f0d132aa1972bd409dcb9903f6b..161c98efade9b9f290c04588e4638df0f3c421ac 100644 (file)
@@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
        if (phy->attached_tproto & SAS_PROTOCOL_STP)
                dev->tproto = phy->attached_tproto;
        if (phy->attached_sata_dev)
-               dev->tproto |= SATA_DEV;
+               dev->tproto |= SAS_SATA_DEV;
 
-       if (phy->attached_dev_type == SATA_PENDING)
-               dev->dev_type = SATA_PENDING;
+       if (phy->attached_dev_type == SAS_SATA_PENDING)
+               dev->dev_type = SAS_SATA_PENDING;
        else {
                int res;
 
-               dev->dev_type = SATA_DEV;
+               dev->dev_type = SAS_SATA_DEV;
                res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
                                              &dev->sata_dev.rps_resp);
                if (res) {
@@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
        int res;
 
        /* we weren't pending, so successfully end the reset sequence now */
-       if (dev->dev_type != SATA_PENDING)
+       if (dev->dev_type != SAS_SATA_PENDING)
                return 1;
 
        /* hmmm, if this succeeds do we need to repost the domain_device to the
@@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link)
                return 0;
 
        switch (ex_phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                return 0;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (ex_phy->attached_sata_dev)
                        return sas_ata_clear_pending(dev, ex_phy);
        default:
@@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev)
        struct dev_to_host_fis *fis =
                (struct dev_to_host_fis *) dev->frame_rcvd;
 
-       if (dev->dev_type == SATA_PENDING)
+       if (dev->dev_type == SAS_SATA_PENDING)
                return;
 
        if ((fis->sector_count == 1 && /* ATA */
@@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev)
 {
        int res;
 
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                return -ENODEV;
 
        sas_get_ata_command_set(dev);
index a0c3003e0c7d2f6b5e1a89147ab29d58b1609aaa..62b58d38ce2e63beda6d1ba4d9876c9af2e51008 100644 (file)
 void sas_init_dev(struct domain_device *dev)
 {
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                INIT_LIST_HEAD(&dev->ex_dev.children);
                mutex_init(&dev->ex_dev.cmd_mutex);
                break;
@@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port)
                if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
                    fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
                    && (fis->device & ~0x10) == 0)
-                       dev->dev_type = SATA_PM;
+                       dev->dev_type = SAS_SATA_PM;
                else
-                       dev->dev_type = SATA_DEV;
+                       dev->dev_type = SAS_SATA_DEV;
                dev->tproto = SAS_PROTOCOL_SATA;
        } else {
                struct sas_identify_frame *id =
@@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
 
        dev->port = port;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                rc = sas_ata_init(dev);
                if (rc) {
                        rphy = NULL;
                        break;
                }
                /* fall through */
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                rphy = sas_end_device_alloc(port->port);
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
        dev->rphy = rphy;
        get_device(&dev->rphy->dev);
 
-       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
+       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
                list_add_tail(&dev->disco_list_node, &port->disco_list);
        else {
                spin_lock_irq(&port->dev_list_lock);
@@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref)
        dev->phy = NULL;
 
        /* remove the phys and ports, everything else should be gone */
-       if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
+       if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                kfree(dev->ex_dev.ex_phy);
 
        if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
        spin_unlock_irq(&port->dev_list_lock);
 
        spin_lock_irq(&ha->lock);
-       if (dev->dev_type == SAS_END_DEV &&
+       if (dev->dev_type == SAS_END_DEVICE &&
            !list_empty(&dev->ssp_dev.eh_list_node)) {
                list_del_init(&dev->ssp_dev.eh_list_node);
                ha->eh_active--;
@@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work)
                    task_pid_nr(current));
 
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                error = sas_discover_end_dev(dev);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                error = sas_discover_root_expander(dev);
                break;
-       case SATA_DEV:
-       case SATA_PM:
+       case SAS_SATA_DEV:
+       case SAS_SATA_PM:
 #ifdef CONFIG_SCSI_SAS_ATA
                error = sas_discover_sata(dev);
                break;
index f42b0e15410f8a52e0a570664b04547d459e7aec..446b85110a1fc0a69b07e42bb3ecc7144d76d1ce 100644 (file)
@@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
        }
 }
 
-static enum sas_dev_type to_dev_type(struct discover_resp *dr)
+static enum sas_device_type to_dev_type(struct discover_resp *dr)
 {
        /* This is detecting a failure to transmit initial dev to host
         * FIS as described in section J.5 of sas-2 r16
         */
-       if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
+       if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
            dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
-               return SATA_PENDING;
+               return SAS_SATA_PENDING;
        else
                return dr->attached_dev_type;
 }
 
 static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 {
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        enum sas_linkrate linkrate;
        u8 sas_addr[SAS_ADDR_SIZE];
        struct smp_resp *resp = rsp;
@@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* Handle vacant phy - rest of dr data is not valid so skip it */
        if (phy->phy_state == PHY_VACANT) {
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
-               phy->attached_dev_type = NO_DEVICE;
+               phy->attached_dev_type = SAS_PHY_UNUSED;
                if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
                        phy->phy_id = phy_id;
                        goto skip;
@@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* help some expanders that fail to zero sas_address in the 'no
         * device' case
         */
-       if (phy->attached_dev_type == NO_DEVICE ||
+       if (phy->attached_dev_type == SAS_PHY_UNUSED ||
            phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
        else
@@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 
  out:
        switch (phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                type = "stp pending";
                break;
-       case NO_DEVICE:
+       case SAS_PHY_UNUSED:
                type = "no device";
                break;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (phy->attached_iproto) {
                        if (phy->attached_tproto)
                                type = "host+target";
@@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
                                type = "ssp";
                }
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                type = "smp";
                break;
        default:
@@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev(
        } else
 #endif
          if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
-               child->dev_type = SAS_END_DEV;
+               child->dev_type = SAS_END_DEVICE;
                rphy = sas_end_device_alloc(phy->port);
                /* FIXME: error handling */
                if (unlikely(!rphy))
@@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander(
 
 
        switch (phy->attached_dev_type) {
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
                sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
 
-       if (ex_phy->attached_dev_type == NO_DEVICE) {
+       if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
                if (ex_phy->routing_attr == DIRECT_ROUTING) {
                        memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
                        sas_configure_routing(dev, ex_phy->attached_sas_addr);
@@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
                return 0;
 
-       if (ex_phy->attached_dev_type != SAS_END_DEV &&
-           ex_phy->attached_dev_type != FANOUT_DEV &&
-           ex_phy->attached_dev_type != EDGE_DEV &&
-           ex_phy->attached_dev_type != SATA_PENDING) {
+       if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
+           ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_SATA_PENDING) {
                SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
                            "phy 0x%x\n", ex_phy->attached_dev_type,
                            SAS_ADDR(dev->sas_addr),
@@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        }
 
        switch (ex_phy->attached_dev_type) {
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                child = sas_ex_discover_end_dev(dev, phy_id);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
                        SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
                                    "attached to ex %016llx phy 0x%x\n",
@@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                        memcpy(dev->port->disc.fanout_sas_addr,
                               ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
                /* fallthrough */
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                child = sas_ex_discover_expander(dev, phy_id);
                break;
        default:
@@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == EDGE_DEV ||
-                    phy->attached_dev_type == FANOUT_DEV) &&
+               if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
        u8 sub_addr[8] = {0, };
 
        list_for_each_entry(child, &ex->children, siblings) {
-               if (child->dev_type != EDGE_DEV &&
-                   child->dev_type != FANOUT_DEV)
+               if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+                   child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                        continue;
                if (sub_addr[0] == 0) {
                        sas_find_sub_addr(child, sub_addr);
@@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
        int i;
        u8  *sub_sas_addr = NULL;
 
-       if (dev->dev_type != EDGE_DEV)
+       if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
                return 0;
 
        for (i = 0; i < ex->num_phys; i++) {
@@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == FANOUT_DEV ||
-                    phy->attached_dev_type == EDGE_DEV) &&
+               if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        if (!sub_sas_addr)
@@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
                                                 struct ex_phy *child_phy)
 {
        static const char *ex_type[] = {
-               [EDGE_DEV] = "edge",
-               [FANOUT_DEV] = "fanout",
+               [SAS_EDGE_EXPANDER_DEVICE] = "edge",
+               [SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
        };
        struct domain_device *parent = child->parent;
 
@@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child)
        if (!child->parent)
                return 0;
 
-       if (child->parent->dev_type != EDGE_DEV &&
-           child->parent->dev_type != FANOUT_DEV)
+       if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                return 0;
 
        parent_ex = &child->parent->ex_dev;
@@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child)
                child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
 
                switch (child->parent->dev_type) {
-               case EDGE_DEV:
-                       if (child->dev_type == FANOUT_DEV) {
+               case SAS_EDGE_EXPANDER_DEVICE:
+                       if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                                if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
                                    child_phy->routing_attr != TABLE_ROUTING) {
                                        sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child)
                                }
                        }
                        break;
-               case FANOUT_DEV:
+               case SAS_FANOUT_EXPANDER_DEVICE:
                        if (parent_phy->routing_attr != TABLE_ROUTING ||
                            child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
                                sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
        struct domain_device *dev;
 
        list_for_each_entry(dev, &port->dev_list, dev_list_node) {
-               if (dev->dev_type == EDGE_DEV ||
-                   dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(dev->rphy);
 
@@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
 }
 
 static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
-                                   u8 *sas_addr, enum sas_dev_type *type)
+                                   u8 *sas_addr, enum sas_device_type *type)
 {
        int res;
        struct smp_resp *disc_resp;
@@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
                        SAS_DPRINTK("Expander phys DID NOT change\n");
        }
        list_for_each_entry(ch, &ex->children, siblings) {
-               if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
+               if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        res = sas_find_bcast_dev(ch, src_dev);
                        if (*src_dev)
                                return res;
@@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
 
        list_for_each_entry_safe(child, n, &ex->children, siblings) {
                set_bit(SAS_DEV_GONE, &child->state);
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV)
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        sas_unregister_ex_tree(port, child);
                else
                        sas_unregister_dev(port, child);
@@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
                        if (SAS_ADDR(child->sas_addr) ==
                            SAS_ADDR(phy->attached_sas_addr)) {
                                set_bit(SAS_DEV_GONE, &child->state);
-                               if (child->dev_type == EDGE_DEV ||
-                                   child->dev_type == FANOUT_DEV)
+                               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                        sas_unregister_ex_tree(parent->port, child);
                                else
                                        sas_unregister_dev(parent->port, child);
@@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
        int res = 0;
 
        list_for_each_entry(child, &ex_root->children, siblings) {
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV) {
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(child->rphy);
 
@@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        list_for_each_entry(child, &dev->ex_dev.children, siblings) {
                if (SAS_ADDR(child->sas_addr) ==
                    SAS_ADDR(ex_phy->attached_sas_addr)) {
-                       if (child->dev_type == EDGE_DEV ||
-                           child->dev_type == FANOUT_DEV)
+                       if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                           child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                res = sas_discover_bfs_by_root(child);
                        break;
                }
@@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        return res;
 }
 
-static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
+static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
 {
        if (old == new)
                return true;
 
        /* treat device directed resets as flutter, if we went
-        * SAS_END_DEV to SATA_PENDING the link needs recovery
+        * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
         */
-       if ((old == SATA_PENDING && new == SAS_END_DEV) ||
-           (old == SAS_END_DEV && new == SATA_PENDING))
+       if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
+           (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
                return true;
 
        return false;
@@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 {
        struct expander_device *ex = &dev->ex_dev;
        struct ex_phy *phy = &ex->ex_phy[phy_id];
-       enum sas_dev_type type = NO_DEVICE;
+       enum sas_device_type type = SAS_PHY_UNUSED;
        u8 sas_addr[8];
        int res;
 
@@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 
                sas_ex_phy_discover(dev, phy_id);
 
-               if (ata_dev && phy->attached_dev_type == SATA_PENDING)
+               if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
                        action = ", needs recovery";
                SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
                            SAS_ADDR(dev->sas_addr), phy_id, action);
index 1de67964e5a1e39ffa10151c79489078826e2536..7e7ba83f0a2139c565f692fa93d0b3f5954abcce 100644 (file)
@@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
        rphy->identify.initiator_port_protocols = dev->iproto;
        rphy->identify.target_port_protocols = dev->tproto;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                /* FIXME: need sata device type */
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                rphy->identify.device_type = SAS_END_DEVICE;
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
                break;
        default:
index 1398b714c01836ee3789199cf3cd627ac4fa4021..d3c5297c6c89e242d845d16142f7a309ed4ef773 100644 (file)
@@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
                        continue;
                }
 
-               if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        dev->ex_dev.ex_change_count = -1;
                        for (i = 0; i < dev->ex_dev.num_phys; i++) {
                                struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
index 7706c99ec8bbb1e8c70b051113f789cb6f97c1dd..bcc56cac4fd8f358d53084b598c167d060a82c19 100644 (file)
@@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
 #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128      /* sg element count per scsi
                cmnd for menlo needs nearly twice as for firmware
                downloads using bsg */
-#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+
+#define LPFC_MIN_SG_SLI4_BUF_SZ        0x800   /* based on LPFC_DEFAULT_SG_SEG_CNT */
+#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT_DIF 512    /* sg element count per scsi cmnd  */
 #define LPFC_MAX_SG_SEG_CNT    4096    /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGL_SEG_CNT   512     /* SGL element count per scsi cmnd */
+#define LPFC_MAX_BPL_SEG_CNT   4096    /* BPL element count per scsi cmnd */
+
 #define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
-#define LPFC_MAX_PROT_SG_SEG_CNT 4096  /* prot sg element count per scsi cmd*/
 #define LPFC_IOCB_LIST_CNT     2250    /* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
 #define LPFC_VNAME_LEN         100     /* vport symbolic name length */
@@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
  * queue depths when there are driver resource error or Firmware
  * resource error.
  */
-#define QUEUE_RAMP_DOWN_INTERVAL       (1 * HZ)   /* 1 Second */
-#define QUEUE_RAMP_UP_INTERVAL         (300 * HZ) /* 5 minutes */
+/* 1 Second */
+#define QUEUE_RAMP_DOWN_INTERVAL       (msecs_to_jiffies(1000 * 1))
+/* 5 minutes */
+#define QUEUE_RAMP_UP_INTERVAL         (msecs_to_jiffies(1000 * 300))
 
 /* Number of exchanges reserved for discovery to complete */
 #define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@ struct lpfc_hba {
        uint32_t lmt;
 
        uint32_t fc_topology;   /* link topology, from LINK INIT */
+       uint32_t fc_topology_changed;   /* link topology, from LINK INIT */
 
        struct lpfc_stats fc_stat;
 
@@ -701,9 +709,11 @@ struct lpfc_hba {
        uint32_t cfg_poll_tmo;
        uint32_t cfg_use_msi;
        uint32_t cfg_fcp_imax;
+       uint32_t cfg_fcp_cpu_map;
        uint32_t cfg_fcp_wq_count;
        uint32_t cfg_fcp_eq_count;
        uint32_t cfg_fcp_io_channel;
+       uint32_t cfg_total_seg_cnt;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_prot_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@ struct lpfc_hba {
        uint64_t bg_reftag_err_cnt;
 
        /* fastpath list. */
-       spinlock_t scsi_buf_list_lock;
-       struct list_head lpfc_scsi_buf_list;
+       spinlock_t scsi_buf_list_get_lock;  /* SCSI buf alloc list lock */
+       spinlock_t scsi_buf_list_put_lock;  /* SCSI buf free list lock */
+       struct list_head lpfc_scsi_buf_list_get;
+       struct list_head lpfc_scsi_buf_list_put;
        uint32_t total_scsi_bufs;
        struct list_head lpfc_iocb_list;
        uint32_t total_iocbq_bufs;
index 9290713af253fd740454bbee6d2b6807f64f4b93..3c5625b8b1f4e83814f878c590f27377f4aaecbf 100644 (file)
@@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
        int i;
        int rc;
 
+       if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+               return 0;
+
        init_completion(&online_compl);
        rc = lpfc_workq_post_event(phba, &status, &online_compl,
                              LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
        int status = 0;
        int rc;
 
-       if (!phba->cfg_enable_hba_reset)
+       if ((!phba->cfg_enable_hba_reset) ||
+           (phba->pport->fc_flag & FC_OFFLINE_MODE))
                return -EACCES;
 
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
                pci_disable_sriov(pdev);
                phba->cfg_sriov_nr_virtfn = 0;
        }
+
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 
        if (status != 0)
@@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                        "3054 lpfc_topology changed from %d to %d\n",
                        prev_val, val);
+               if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
+                       phba->fc_topology_changed = 1;
                err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
                if (err) {
                        phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
 static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
                   lpfc_fcp_imax_show, lpfc_fcp_imax_store);
 
+/**
+ * lpfc_state_show - Display current driver CPU affinity
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
+                     char *buf)
+{
+       struct Scsi_Host  *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       struct lpfc_vector_map_info *cpup;
+       int  idx, len = 0;
+
+       if ((phba->sli_rev != LPFC_SLI_REV4) ||
+           (phba->intr_type != MSIX))
+               return len;
+
+       switch (phba->cfg_fcp_cpu_map) {
+       case 0:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: No mapping (%d)\n",
+                               phba->cfg_fcp_cpu_map);
+               return len;
+       case 1:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: HBA centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       case 2:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: Driver centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+               if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id);
+               else
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d IRQ %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id, cpup->irq);
+
+               cpup++;
+       }
+       return len;
+}
+
+/**
+ * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL  - Not implemented yet.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
+{
+       int status = -EINVAL;
+       return status;
+}
+
+/*
+# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
+# for the HBA.
+#
+# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+#      0 - Do not affinitze IRQ vectors
+#      1 - Affintize HBA vectors with respect to each HBA
+#          (start with CPU0 for each HBA)
+#      2 - Affintize HBA vectors with respect to the entire driver
+#          (round robin thru all CPUs across all HBAs)
+*/
+static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_cpu_map,
+                "Defines how to map CPUs to IRQ vectors per HBA");
+
+/**
+ * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0-2], then affinitze the adapter's
+ * MSIX vectors.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
+{
+       if (phba->sli_rev != LPFC_SLI_REV4) {
+               phba->cfg_fcp_cpu_map = 0;
+               return 0;
+       }
+
+       if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
+               phba->cfg_fcp_cpu_map = val;
+               return 0;
+       }
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3326 fcp_cpu_map: %d out of range, using default\n",
+                       val);
+       phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+
+       return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
+                  lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+
 /*
 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
 # Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 #       0  = disabled (default)
 #       1  = enabled
 # Value range is [0,1]. Default value is 0.
+#
+# This feature in under investigation and may be supported in the future.
 */
 unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
 
-module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
-
 /*
 # lpfc_prot_mask: i
 #      - Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
 
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 256. The default value is
+ * This value can be set to values between 64 and 4096. The default value is
  * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
  * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * Because of the additional overhead involved in setting up T10-DIF,
+ * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
+ * and will be limited to 512 if BlockGuard is enabled under SLI3.
  */
 LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
            LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
 
-LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
-               LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
-               "Max Protection Scatter Gather Segment Count");
+/*
+ * This parameter will be depricated, the driver cannot limit the
+ * protection data s/g list.
+ */
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
+           LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
+           "Max Protection Scatter Gather Segment Count");
 
 struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_bg_info,
@@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_poll_tmo,
        &dev_attr_lpfc_use_msi,
        &dev_attr_lpfc_fcp_imax,
+       &dev_attr_lpfc_fcp_cpu_map,
        &dev_attr_lpfc_fcp_wq_count,
        &dev_attr_lpfc_fcp_eq_count,
        &dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
        lpfc_use_msi_init(phba, lpfc_use_msi);
        lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+       lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
        lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
        lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
        lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
index 8886668920049e685cb9b5d3572cb1426dbea526..094be2cad65bba821e9050a48bb2602dfd65f067 100644 (file)
@@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
        unsigned int transfer_bytes, bytes_copied = 0;
        unsigned int sg_offset, dma_offset;
        unsigned char *dma_address, *sg_address;
-       struct scatterlist *sgel;
        LIST_HEAD(temp_list);
-
+       struct sg_mapping_iter miter;
+       unsigned long flags;
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+       bool sg_valid;
 
        list_splice_init(&dma_buffers->list, &temp_list);
        list_add(&dma_buffers->list, &temp_list);
        sg_offset = 0;
-       sgel = bsg_buffers->sg_list;
+       if (to_buffers)
+               sg_flags |= SG_MITER_FROM_SG;
+       else
+               sg_flags |= SG_MITER_TO_SG;
+       sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
+                      sg_flags);
+       local_irq_save(flags);
+       sg_valid = sg_miter_next(&miter);
        list_for_each_entry(mp, &temp_list, list) {
                dma_offset = 0;
-               while (bytes_to_transfer && sgel &&
+               while (bytes_to_transfer && sg_valid &&
                       (dma_offset < LPFC_BPL_SIZE)) {
                        dma_address = mp->virt + dma_offset;
                        if (sg_offset) {
                                /* Continue previous partial transfer of sg */
-                               sg_address = sg_virt(sgel) + sg_offset;
-                               transfer_bytes = sgel->length - sg_offset;
+                               sg_address = miter.addr + sg_offset;
+                               transfer_bytes = miter.length - sg_offset;
                        } else {
-                               sg_address = sg_virt(sgel);
-                               transfer_bytes = sgel->length;
+                               sg_address = miter.addr;
+                               transfer_bytes = miter.length;
                        }
                        if (bytes_to_transfer < transfer_bytes)
                                transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
                        sg_offset += transfer_bytes;
                        bytes_to_transfer -= transfer_bytes;
                        bytes_copied += transfer_bytes;
-                       if (sg_offset >= sgel->length) {
+                       if (sg_offset >= miter.length) {
                                sg_offset = 0;
-                               sgel = sg_next(sgel);
+                               sg_valid = sg_miter_next(&miter);
                        }
                }
        }
+       sg_miter_stop(&miter);
+       local_irq_restore(flags);
        list_del_init(&dma_buffers->list);
        list_splice(&temp_list, &dma_buffers->list);
        return bytes_copied;
@@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
        cmdiocbq->context1 = dd_data;
        cmdiocbq->context2 = cmp;
        cmdiocbq->context3 = bmp;
+       cmdiocbq->context_un.ndlp = ndlp;
        dd_data->type = TYPE_IOCB;
        dd_data->set_job = job;
        dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
        ctiocb->context1 = dd_data;
        ctiocb->context2 = cmp;
        ctiocb->context3 = bmp;
+       ctiocb->context_un.ndlp = ndlp;
        ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
 
        dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
        evt->wait_time_stamp = jiffies;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        if (list_empty(&evt->events_to_see))
                ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
        else {
@@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        evt->waiting = 1;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        evt->waiting = 0;
        if (list_empty(&evt->events_to_see)) {
                rc = (time_left) ? -EINTR : -ETIMEDOUT;
index 7631893ae00511c49033f3445e45f8b164d47d46..d41456e5f8149e3fc09d82086554d20ef9921ce2 100644 (file)
@@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
 void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
+void lpfc_sli4_offline_eratt(struct lpfc_hba *);
index 7bff3a19af56880dba0fdca4e203676d52c4e916..ae1a07c57caef681d5d54222e39b41b67e70643f 100644 (file)
@@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
                if (init_utsname()->nodename[0] != '\0')
                        lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
                else
-                       mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+                       mod_timer(&vport->fc_fdmitmo, jiffies +
+                                 msecs_to_jiffies(1000 * 60));
        }
        return;
 }
index bbed8471bf0b81fce50390bb9b0ec6b7a68ba146..3cae0a92e8bd0001142c7a175102de1e0ab01dc6 100644 (file)
@@ -29,6 +29,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
@@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 
                icmd->un.elsreq64.remoteID = did;               /* DID */
                icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
-               icmd->ulpTimeout = phba->fc_ratov * 2;
+               if (elscmd == ELS_CMD_FLOGI)
+                       icmd->ulpTimeout = FF_DEF_RATOV * 2;
+               else
+                       icmd->ulpTimeout = phba->fc_ratov * 2;
        } else {
                icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
                icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                /* Xmit ELS command <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0116 Xmit ELS command x%x to remote "
-                                "NPORT x%x I/O tag: x%x, port state: x%x\n",
+                                "NPORT x%x I/O tag: x%x, port state:x%x"
+                                " fc_flag:x%x\n",
                                 elscmd, did, elsiocb->iotag,
-                                vport->port_state);
+                                vport->port_state,
+                                vport->fc_flag);
        } else {
                /* Xmit ELS response <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0117 Xmit ELS response x%x to remote "
-                                "NPORT x%x I/O tag: x%x, size: x%x\n",
+                                "NPORT x%x I/O tag: x%x, size: x%x "
+                                "port_state x%x fc_flag x%x\n",
                                 elscmd, ndlp->nlp_DID, elsiocb->iotag,
-                                cmdSize);
+                                cmdSize, vport->port_state,
+                                vport->fc_flag);
        }
        return elsiocb;
 
@@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_PT2PT;
        spin_unlock_irq(shost->host_lock);
+       /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+       if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+               lpfc_unregister_fcf_prep(phba);
+
+               /* The FC_VFI_REGISTERED flag will get clear in the cmpl
+                * handler for unreg_vfi, but if we don't force the
+                * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
+                * built with the update bit set instead of just the vp bit to
+                * change the Nport ID.  We need to have the vp set and the
+                * Upd cleared on topology changes.
+                */
+               spin_lock_irq(shost->host_lock);
+               vport->fc_flag &= ~FC_VFI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+               phba->fc_topology_changed = 0;
+               lpfc_issue_reg_vfi(vport);
+       }
 
        /* Start discovery - this should just do CLEAR_LA */
        lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi:
                        vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
                if ((phba->sli_rev == LPFC_SLI_REV4) &&
                    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
-                    (vport->fc_prevDID != vport->fc_myDID))) {
-                       if (vport->fc_flag & FC_VFI_REGISTERED)
-                               lpfc_sli4_unreg_all_rpis(vport);
+                    (vport->fc_prevDID != vport->fc_myDID) ||
+                       phba->fc_topology_changed)) {
+                       if (vport->fc_flag & FC_VFI_REGISTERED) {
+                               if (phba->fc_topology_changed) {
+                                       lpfc_unregister_fcf_prep(phba);
+                                       spin_lock_irq(shost->host_lock);
+                                       vport->fc_flag &= ~FC_VFI_REGISTERED;
+                                       spin_unlock_irq(shost->host_lock);
+                                       phba->fc_topology_changed = 0;
+                               } else {
+                                       lpfc_sli4_unreg_all_rpis(vport);
+                               }
+                       }
                        lpfc_issue_reg_vfi(vport);
                        lpfc_nlp_put(ndlp);
                        goto out;
@@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi:
 
        /* FLOGI completes successfully */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0101 FLOGI completes successfully "
-                        "Data: x%x x%x x%x x%x\n",
+                        "0101 FLOGI completes successfully, I/O tag:x%x, "
+                        "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
                         irsp->un.ulpWord[4], sp->cmn.e_d_tov,
-                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
+                        vport->port_state, vport->fc_flag);
 
        if (vport->port_state == LPFC_FLOGI) {
                /*
@@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        struct ls_rjt stat;
        uint32_t cmd, did;
        int rc;
+       uint32_t fc_flag = 0;
+       uint32_t port_state = 0;
 
        cmd = *lp++;
        sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                         * will be.
                         */
                        vport->fc_myDID = PT2PT_LocalID;
-               }
+               } else
+                       vport->fc_myDID = PT2PT_RemoteID;
 
                /*
                 * The vport state should go to LPFC_FLOGI only
                 * AFTER we issue a FLOGI, not receive one.
                 */
                spin_lock_irq(shost->host_lock);
+               fc_flag = vport->fc_flag;
+               port_state = vport->port_state;
                vport->fc_flag |= FC_PT2PT;
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+               vport->port_state = LPFC_FLOGI;
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                "3311 Rcv Flogi PS x%x new PS x%x "
+                                "fc_flag x%x new fc_flag x%x\n",
+                                port_state, vport->port_state,
+                                fc_flag, vport->fc_flag);
 
                /*
                 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
        }
 
        if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
-               mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+               mod_timer(&vport->els_tmofunc,
+                         jiffies + msecs_to_jiffies(1000 * timeout));
 }
 
 /**
@@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        /* ELS command <elsCmd> received from NPORT <did> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "0112 ELS command x%x received from NPORT x%x "
-                        "Data: x%x\n", cmd, did, vport->port_state);
+                        "Data: x%x x%x x%x x%x\n",
+                       cmd, did, vport->port_state, vport->fc_flag,
+                       vport->fc_myDID, vport->fc_prevDID);
        switch (cmd) {
        case ELS_CMD_PLOGI:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                phba->fc_stat.elsRcvPLOGI++;
                ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+               if (phba->sli_rev == LPFC_SLI_REV4 &&
+                   (phba->pport->fc_flag & FC_PT2PT)) {
+                       vport->fc_prevDID = vport->fc_myDID;
+                       /* Our DID needs to be updated before registering
+                        * the vfi. This is done in lpfc_rcv_plogi but
+                        * that is called after the reg_vfi.
+                        */
+                       vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                        "3312 Remote port assigned DID x%x "
+                                        "%x\n", vport->fc_myDID,
+                                        vport->fc_prevDID);
+               }
 
                lpfc_send_els_event(vport, ndlp, payload);
 
@@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
+               shost = lpfc_shost_from_vport(vport);
                if (vport->port_state < LPFC_DISC_AUTH) {
                        if (!(phba->pport->fc_flag & FC_PT2PT) ||
                                (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                         * another NPort and the other side has initiated
                         * the PLOGI before responding to our FLOGI.
                         */
+                       if (phba->sli_rev == LPFC_SLI_REV4 &&
+                           (phba->fc_topology_changed ||
+                            vport->fc_myDID != vport->fc_prevDID)) {
+                               lpfc_unregister_fcf_prep(phba);
+                               spin_lock_irq(shost->host_lock);
+                               vport->fc_flag &= ~FC_VFI_REGISTERED;
+                               spin_unlock_irq(shost->host_lock);
+                               phba->fc_topology_changed = 0;
+                               lpfc_issue_reg_vfi(vport);
+                       }
                }
 
-               shost = lpfc_shost_from_vport(vport);
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
                spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
        spin_lock_irq(shost->host_lock);
        if (vport->fc_flag & FC_DISC_DELAYED) {
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                               "3334 Delay fc port discovery for %d seconds\n",
+                               phba->fc_ratov);
                mod_timer(&vport->delayed_disc_tmo,
-                       jiffies + HZ * phba->fc_ratov);
+                       jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
                return;
        }
        spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
                return;
 
        shost = lpfc_shost_from_vport(phba->pport);
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
        blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
        /* Start a timer to unblock fabric iocbs after 100ms */
        if (!blocked)
-               mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+               mod_timer(&phba->fabric_block_timer,
+                         jiffies + msecs_to_jiffies(100));
 
        return;
 }
index 326e05a65a7314d7648a16f0097485bb5d439a0b..0f6e2548f35d7b7b03d870b813b5c1726bfcc48d 100644 (file)
@@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        if (!list_empty(&evtp->evt_listp))
                return;
 
+       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+
        spin_lock_irq(&phba->hbalock);
        /* We need to hold the node by incrementing the reference
         * count until this queued work is done
         */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
        if (evtp->evt_arg1) {
                evtp->evt = LPFC_EVT_DEV_LOSS;
                list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
                        lpfc_linkup_port(vports[i]);
        lpfc_destroy_vport_work_array(phba, vports);
-       if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-           (phba->sli_rev < LPFC_SLI_REV4))
-               lpfc_issue_clear_la(phba, phba->pport);
 
        return 0;
 }
@@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
                phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
                phba->hba_flag &= ~FCF_TS_INPROG;
-               if (phba->pport->port_state != LPFC_FLOGI) {
+               if (phba->pport->port_state != LPFC_FLOGI &&
+                   phba->pport->fc_flag & FC_FABRIC) {
                        phba->hba_flag |= FCF_RR_INPROG;
                        spin_unlock_irq(&phba->hbalock);
                        lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                                spin_unlock_irq(&phba->hbalock);
                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
                                                "2836 New FCF matches in-use "
-                                               "FCF (x%x)\n",
-                                               phba->fcf.current_rec.fcf_indx);
+                                               "FCF (x%x), port_state:x%x, "
+                                               "fc_flag:x%x\n",
+                                               phba->fcf.current_rec.fcf_indx,
+                                               phba->pport->port_state,
+                                               phba->pport->fc_flag);
                                goto out;
                        } else
                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@ void
 lpfc_issue_init_vpi(struct lpfc_vport *vport)
 {
        LPFC_MBOXQ_t *mboxq;
-       int rc;
+       int rc, vpi;
+
+       if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
+               vpi = lpfc_alloc_vpi(vport->phba);
+               if (!vpi) {
+                       lpfc_printf_vlog(vport, KERN_ERR,
+                                        LOG_MBOX,
+                                        "3303 Failed to obtain vport vpi\n");
+                       lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+                       return;
+               }
+               vport->vpi = vpi;
+       }
 
        mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq) {
@@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
-       /* If the VFI is already registered, there is nothing else to do */
+       /* If the VFI is already registered, there is nothing else to do
+        * Unless this was a VFI update and we are in PT2PT mode, then
+        * we should drop through to set the port state to ready.
+        */
        if (vport->fc_flag & FC_VFI_REGISTERED)
-               goto out_free_mem;
+               if (!(phba->sli_rev == LPFC_SLI_REV4 &&
+                     vport->fc_flag & FC_PT2PT))
+                       goto out_free_mem;
 
        /* The VPI is implicitly registered when the VFI is registered */
        spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                        "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
+                        "alpacnt:%d LinkState:%x topology:%x\n",
+                        vport->port_state, vport->fc_flag, vport->fc_myDID,
+                        vport->phba->alpa_map[0],
+                        phba->link_state, phba->fc_topology);
+
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
                /*
                 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                        /* Use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
                        /* Start discovery */
-                       lpfc_disc_start(vport);
+                       if (vport->fc_flag & FC_PT2PT)
+                               vport->port_state = LPFC_VPORT_READY;
+                       else
+                               lpfc_disc_start(vport);
                } else {
                        lpfc_start_fdiscs(phba);
                        lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
                break;
        }
 
+       if (phba->fc_topology &&
+           phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "3314 Toplogy changed was 0x%x is 0x%x\n",
+                               phba->fc_topology,
+                               bf_get(lpfc_mbx_read_top_topology, la));
+               phba->fc_topology_changed = 1;
+       }
+
        phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
@@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
                        tmo, vport->port_state, vport->fc_flag);
        }
 
-       mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+       mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_DISC_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
        uint32_t clear_la_pending;
        int did_changed;
 
-       if (!lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba)) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "3315 Link is not up %x\n",
+                                phba->link_state);
                return;
+       }
 
        if (phba->link_state == LPFC_CLEAR_LA)
                clear_la_pending = 1;
@@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
        if (num_sent)
                return;
 
-       /* Register the VPI for SLI3, NON-NPIV only. */
+       /* Register the VPI for SLI3, NPIV only. */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_PT2PT) &&
            !(vport->fc_flag & FC_RSCN_MODE) &&
            (phba->sli_rev < LPFC_SLI_REV4)) {
+               if (vport->port_type == LPFC_PHYSICAL_PORT)
+                       lpfc_issue_clear_la(phba, vport);
                lpfc_issue_reg_vpi(phba, vport);
                return;
        }
@@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (vport->cfg_fdmi_on == 1)
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
        else
-               mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+               mod_timer(&vport->fc_fdmitmo,
+                         jiffies + msecs_to_jiffies(1000 * 60));
 
        /* decrement the node reference count held for this callback
         * function.
@@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        struct lpfc_nodelist *ndlp;
        struct Scsi_Host *shost;
-       int i, rc;
+       int i = 0, rc;
 
        /* Unregister RPIs */
        if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
                        spin_unlock_irq(shost->host_lock);
                }
        lpfc_destroy_vport_work_array(phba, vports);
+       if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
+               ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+               if (ndlp)
+                       lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+               lpfc_cleanup_pending_mbox(phba->pport);
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(phba->pport);
+               lpfc_mbx_unreg_vpi(phba->pport);
+               shost = lpfc_shost_from_vport(phba->pport);
+               spin_lock_irq(shost->host_lock);
+               phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+               phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+       }
 
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
index e8c47603170370ca0d2418bb3e4d759b377d60e3..83700c18f4688a872894aec6da35d7c46058580a 100644 (file)
@@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
 #define        BG_OP_IN_CSUM_OUT_CSUM          0x5
 #define        BG_OP_IN_CRC_OUT_CSUM           0x6
 #define        BG_OP_IN_CSUM_OUT_CRC           0x7
+#define        BG_OP_RAW_MODE                  0x8
 
 struct lpfc_pde5 {
        uint32_t word0;
index 1dd2f6f0a1272f4717e6063449879b55b06ce4b0..713a4613ec3ac1c22829fa9d150d6c9767ae3d80 100644 (file)
@@ -200,6 +200,11 @@ struct lpfc_sli_intf {
 #define LPFC_MAX_IMAX          5000000
 #define LPFC_DEF_IMAX          50000
 
+#define LPFC_MIN_CPU_MAP       0
+#define LPFC_MAX_CPU_MAP       2
+#define LPFC_HBA_CPU_MAP       1
+#define LPFC_DRIVER_CPU_MAP    2  /* Default */
+
 /* PORT_CAPABILITIES constants. */
 #define LPFC_MAX_SUPPORTED_PAGES       8
 
@@ -621,7 +626,7 @@ struct lpfc_register {
 #define lpfc_sliport_status_rdy_SHIFT  23
 #define lpfc_sliport_status_rdy_MASK   0x1
 #define lpfc_sliport_status_rdy_WORD   word0
-#define MAX_IF_TYPE_2_RESETS   1000
+#define MAX_IF_TYPE_2_RESETS           6
 
 #define LPFC_CTL_PORT_CTL_OFFSET       0x408
 #define lpfc_sliport_ctrl_end_SHIFT    30
index 90b8b0515e23d7c0ee0f309d640c5fb6e94e8988..cb465b253910b9972eb48639d4527162a7a0b0b9 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/miscdevice.h>
+#include <linux/percpu.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@ char *_dump_buf_dif;
 unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
+/* Used when mapping IRQ vectors in a driver centric manner */
+uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
+
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 
        /* Set up ring-0 (ELS) timer */
        timeout = phba->fc_ratov * 2;
-       mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+       mod_timer(&vport->els_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * timeout));
        /* Set up heart beat (HB) timer */
-       mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+       mod_timer(&phba->hb_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
        /* Set up error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        if (phba->hba_flag & LINK_DISABLED) {
                lpfc_printf_log(phba,
@@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
                psb->pCmd = NULL;
                psb->status = IOSTAT_SUCCESS;
        }
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_splice(&aborts, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+       list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        return 0;
 }
 
@@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
                !(phba->link_state == LPFC_HBA_ERROR) &&
                !(phba->pport->load_flag & FC_UNLOADING))
                mod_timer(&phba->hb_tmofunc,
-                       jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        return;
 }
 
@@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 
        spin_lock_irq(&phba->pport->work_port_lock);
 
-       if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
-               jiffies)) {
+       if (time_after(phba->last_completion_time +
+                       msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+                       jiffies)) {
                spin_unlock_irq(&phba->pport->work_port_lock);
                if (!phba->hb_outstanding)
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
                else
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                return;
        }
        spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                if (!pmboxq) {
                                        mod_timer(&phba->hb_tmofunc,
                                                 jiffies +
-                                                HZ * LPFC_HB_MBOX_INTERVAL);
+                                                msecs_to_jiffies(1000 *
+                                                LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
 
@@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                                        phba->mbox_mem_pool);
                                        mod_timer(&phba->hb_tmofunc,
                                                jiffies +
-                                               HZ * LPFC_HB_MBOX_INTERVAL);
+                                               msecs_to_jiffies(1000 *
+                                               LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
                                phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                phba->skipped_hb = jiffies;
 
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                                jiffies +
+                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                        return;
                } else {
                        /*
@@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        jiffies_to_msecs(jiffies
                                                 - phba->last_completion_time));
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                }
        }
 }
@@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  * other than Port Error 6 has been detected.
  **/
-static void
+void
 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
 {
        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
        struct lpfc_vport *vport;
        struct lpfc_vport **vports;
        int i;
+       bool vpis_cleared = false;
 
        if (!phba)
                return 0;
@@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
                        lpfc_unblock_mgmt_io(phba);
                        return 1;
                }
+               spin_lock_irq(&phba->hbalock);
+               if (!phba->sli4_hba.max_cfg_param.vpi_used)
+                       vpis_cleared = true;
+               spin_unlock_irq(&phba->hbalock);
        } else {
                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
                        lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
+                       if (phba->sli_rev == LPFC_SLI_REV4) {
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+                               if ((vpis_cleared) &&
+                                   (vports[i]->port_type !=
+                                       LPFC_PHYSICAL_PORT))
+                                       vports[i]->vpi = 0;
+                       }
                        spin_unlock_irq(shost->host_lock);
                }
                lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
        struct lpfc_iocbq *io, *io_next;
 
        spin_lock_irq(&phba->hbalock);
+
        /* Release all the lpfc_scsi_bufs maintained by this host. */
-       spin_lock(&phba->scsi_buf_list_lock);
-       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+
+       spin_lock(&phba->scsi_buf_list_put_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
+                                list) {
                list_del(&sb->list);
                pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
                              sb->dma_handle);
                kfree(sb);
                phba->total_scsi_bufs--;
        }
-       spin_unlock(&phba->scsi_buf_list_lock);
+       spin_unlock(&phba->scsi_buf_list_put_lock);
+
+       spin_lock(&phba->scsi_buf_list_get_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
+                                list) {
+               list_del(&sb->list);
+               pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+                             sb->dma_handle);
+               kfree(sb);
+               phba->total_scsi_bufs--;
+       }
+       spin_unlock(&phba->scsi_buf_list_get_lock);
 
        /* Release all the lpfc_iocbq entries maintained by this host. */
        list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                        phba->sli4_hba.scsi_xri_cnt,
                        phba->sli4_hba.scsi_xri_max);
 
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
                /* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                                      psb->dma_handle);
                        kfree(psb);
                }
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
 
        /* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                psb->cur_iocbq.sli4_lxritag = lxri;
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
        }
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        return 0;
 
@@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                stat = 1;
                goto finished;
        }
-       if (time >= 30 * HZ) {
+       if (time >= msecs_to_jiffies(30 * 1000)) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0461 Scanning longer than 30 "
                                "seconds.  Continuing initialization\n");
                stat = 1;
                goto finished;
        }
-       if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+       if (time >= msecs_to_jiffies(15 * 1000) &&
+           phba->link_state <= LPFC_LINK_DOWN) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0465 Link down longer than 15 "
                                "seconds.  Continuing initialization\n");
@@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                goto finished;
        if (vport->num_disc_nodes || vport->fc_prli_sent)
                goto finished;
-       if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+       if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
                goto finished;
        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
                goto finished;
@@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        shost = lpfc_shost_from_vport(vport);
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                return -ENOMEM;
 
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
         */
-       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-               sizeof(struct fcp_rsp) +
-                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
 
+       /* Initialize the host templates the configured values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
        if (phba->cfg_enable_bg) {
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-               phba->cfg_sg_dma_buf_size +=
-                       phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a BDE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough BDEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
+
+               /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a BDE for each, and a BDE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+               /* Total BDEs in BPL for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
        }
 
-       /* Also reinitialize the host templates with new values. */
-       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        phba->max_vpi = LPFC_MAX_VPI;
        /* This will be set to correct value after config_port mbox */
@@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
 static int
 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
+       struct lpfc_vector_map_info *cpup;
        struct lpfc_sli *psli;
        LPFC_MBOXQ_t *mboxq;
-       int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+       int rc, i, hbq_count, max_buf_size;
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
-       int longs, sli_family;
-       int sges_per_segment;
+       int longs;
 
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-       /* With BlockGuard we can have multiple SGEs per Data Segemnt */
-       sges_per_segment = 1;
-       if (phba->cfg_enable_bg)
-               sges_per_segment = 2;
-
        /*
         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
         * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                        sizeof(struct lpfc_sli_ring), GFP_KERNEL);
        if (!phba->sli.ring)
                return -ENOMEM;
+
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * It doesn't matter what family our adapter is in, we are
+        * limited to 2 Pages, 512 SGEs, for our SGL.
+        * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+        */
+       max_buf_size = (2 * SLI4_PAGE_SIZE);
+       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+
+       /*
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
-        * To insure that the scsi sgl does not cross a 4k page boundary only
-        * sgl sizes of must be a power of 2.
         */
-       buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
-                   (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
-                   sizeof(struct sli4_sge)));
-
-       sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
-       max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
-       switch (sli_family) {
-       case LPFC_SLI_INTF_FAMILY_BE2:
-       case LPFC_SLI_INTF_FAMILY_BE3:
-               /* There is a single hint for BE - 2 pages per BPL. */
-               if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
-                   LPFC_SLI_INTF_SLI_HINT1_1)
-                       max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
-               break;
-       case LPFC_SLI_INTF_FAMILY_LNCR_A0:
-       case LPFC_SLI_INTF_FAMILY_LNCR_B0:
-       default:
-               break;
+
+       if (phba->cfg_enable_bg) {
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a SGE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough SGEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) + max_buf_size;
+
+               /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a SGE for each, and a SGE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+               /* Total SGEs for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+               /*
+                * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
+                * to post 1 page for the SGL.
+                */
        }
 
-       for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
-            dma_buf_size < max_buf_size && buf_size > dma_buf_size;
-            dma_buf_size = dma_buf_size << 1)
-               ;
-       if (dma_buf_size == max_buf_size)
-               phba->cfg_sg_seg_cnt = (dma_buf_size -
-                       sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
-                       (2 * sizeof(struct sli4_sge))) /
-                               sizeof(struct sli4_sge);
-       phba->cfg_sg_dma_buf_size = dma_buf_size;
+       /* Initialize the host templates with the updated values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
+               phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+       else
+               phba->cfg_sg_dma_buf_size =
+                       SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        /* Initialize buffer queue management fields */
        hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_fcp_eq_hdl;
        }
 
+       phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
+                                        phba->sli4_hba.num_present_cpu),
+                                        GFP_KERNEL);
+       if (!phba->sli4_hba.cpu_map) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3327 Failed allocate memory for msi-x "
+                               "interrupt vector mapping\n");
+               rc = -ENOMEM;
+               goto out_free_msix;
+       }
+       /* Initialize io channels for round robin */
+       cpup = phba->sli4_hba.cpu_map;
+       rc = 0;
+       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+               cpup->channel_id = rc;
+               rc++;
+               if (rc >= phba->cfg_fcp_io_channel)
+                       rc = 0;
+       }
+
        /*
         * Enable sr-iov virtual functions if supported and configured
         * through the module parameter.
@@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        return 0;
 
+out_free_msix:
+       kfree(phba->sli4_hba.msix_entries);
 out_free_fcp_eq_hdl:
        kfree(phba->sli4_hba.fcp_eq_hdl);
 out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 {
        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
+       /* Free memory allocated for msi-x interrupt vector to CPU mapping */
+       kfree(phba->sli4_hba.cpu_map);
+       phba->sli4_hba.num_present_cpu = 0;
+       phba->sli4_hba.num_online_cpu = 0;
+
        /* Free memory allocated for msi-x interrupt vector entries */
        kfree(phba->sli4_hba.msix_entries);
 
@@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        init_waitqueue_head(&phba->work_waitq);
 
        /* Initialize the scsi buffer list used by driver for scsi IO */
-       spin_lock_init(&phba->scsi_buf_list_lock);
-       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+       spin_lock_init(&phba->scsi_buf_list_get_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+       spin_lock_init(&phba->scsi_buf_list_put_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 
        /* Initialize the fabric iocb list */
        INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        int cfg_fcp_io_channel;
        uint32_t cpu;
        uint32_t i = 0;
+       uint32_t j = 0;
 
 
        /*
@@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        /* Sanity check on HBA EQ parameters */
        cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
 
-       /* It doesn't make sense to have more io channels then CPUs */
-       for_each_online_cpu(cpu) {
-               i++;
+       /* It doesn't make sense to have more io channels then online CPUs */
+       for_each_present_cpu(cpu) {
+               if (cpu_online(cpu))
+                       i++;
+               j++;
        }
+       phba->sli4_hba.num_online_cpu = i;
+       phba->sli4_hba.num_present_cpu = j;
+
        if (i < cfg_fcp_io_channel) {
                lpfc_printf_log(phba,
                                KERN_ERR, LOG_INIT,
                                "3188 Reducing IO channels to match number of "
-                               "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
+                               "online CPUs: from %d to %d\n",
+                               cfg_fcp_io_channel, i);
                cfg_fcp_io_channel = i;
        }
 
@@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
 
 out:
        /* Catch the not-ready port failure after a port reset. */
-       if (num_resets >= MAX_IF_TYPE_2_RESETS)
+       if (num_resets >= MAX_IF_TYPE_2_RESETS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3317 HBA not functional: IP Reset Failed "
+                               "after (%d) retries, try: "
+                               "echo fw_reset > board_mode\n", num_resets);
                rc = -ENODEV;
+       }
 
        return rc;
 }
@@ -8208,6 +8348,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Find next available CPU to use for IRQ to CPU affinity.
+ */
+static int
+lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
+{
+       struct lpfc_vector_map_info *cpup;
+       int cpu;
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+
+       /*
+        * If we get here, we have used ALL CPUs for the specific
+        * phys_id. Now we need to clear out lpfc_used_cpu and start
+        * reusing CPUs.
+        */
+
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               if (lpfc_used_cpu[cpu] == phys_id)
+                       lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+       return LPFC_VECTOR_MAP_EMPTY;
+}
+
+/**
+ * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
+ * @phba:      pointer to lpfc hba data structure.
+ * @vectors:   number of HBA vectors
+ *
+ * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
+ * affinization across multple physical CPUs (numa nodes).
+ * In addition, this routine will assign an IO channel for each CPU
+ * to use when issuing I/Os.
+ */
+static int
+lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
+{
+       int i, idx, saved_chann, used_chann, cpu, phys_id;
+       int max_phys_id, num_io_channel, first_cpu;
+       struct lpfc_vector_map_info *cpup;
+#ifdef CONFIG_X86
+       struct cpuinfo_x86 *cpuinfo;
+#endif
+       struct cpumask *mask;
+       uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
+
+       /* If there is no mapping, just return */
+       if (!phba->cfg_fcp_cpu_map)
+               return 1;
+
+       /* Init cpu_map array */
+       memset(phba->sli4_hba.cpu_map, 0xff,
+              (sizeof(struct lpfc_vector_map_info) *
+               phba->sli4_hba.num_present_cpu));
+
+       max_phys_id = 0;
+       phys_id = 0;
+       num_io_channel = 0;
+       first_cpu = LPFC_VECTOR_MAP_EMPTY;
+
+       /* Update CPU map with physical id and core id of each CPU */
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+#ifdef CONFIG_X86
+               cpuinfo = &cpu_data(cpu);
+               cpup->phys_id = cpuinfo->phys_proc_id;
+               cpup->core_id = cpuinfo->cpu_core_id;
+#else
+               /* No distinction between CPUs for other platforms */
+               cpup->phys_id = 0;
+               cpup->core_id = 0;
+#endif
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3328 CPU physid %d coreid %d\n",
+                               cpup->phys_id, cpup->core_id);
+
+               if (cpup->phys_id > max_phys_id)
+                       max_phys_id = cpup->phys_id;
+               cpup++;
+       }
+
+       /* Now associate the HBA vectors with specific CPUs */
+       for (idx = 0; idx < vectors; idx++) {
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = lpfc_find_next_cpu(phba, phys_id);
+               if (cpu == LPFC_VECTOR_MAP_EMPTY) {
+
+                       /* Try for all phys_id's */
+                       for (i = 1; i < max_phys_id; i++) {
+                               phys_id++;
+                               if (phys_id > max_phys_id)
+                                       phys_id = 0;
+                               cpu = lpfc_find_next_cpu(phba, phys_id);
+                               if (cpu == LPFC_VECTOR_MAP_EMPTY)
+                                       continue;
+                               goto found;
+                       }
+
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3329 Cannot set affinity:"
+                                       "Error mapping vector %d (%d)\n",
+                                       idx, vectors);
+                       return 0;
+               }
+found:
+               cpup += cpu;
+               if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
+                       lpfc_used_cpu[cpu] = phys_id;
+
+               /* Associate vector with selected CPU */
+               cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
+
+               /* Associate IO channel with selected CPU */
+               cpup->channel_id = idx;
+               num_io_channel++;
+
+               if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
+                       first_cpu = cpu;
+
+               /* Now affinitize to the selected CPU */
+               mask = &cpup->maskbits;
+               cpumask_clear(mask);
+               cpumask_set_cpu(cpu, mask);
+               i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
+                                         vector, mask);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3330 Set Affinity: CPU %d channel %d "
+                               "irq %d (%x)\n",
+                               cpu, cpup->channel_id,
+                               phba->sli4_hba.msix_entries[idx].vector, i);
+
+               /* Spread vector mapping across multple physical CPU nodes */
+               phys_id++;
+               if (phys_id > max_phys_id)
+                       phys_id = 0;
+       }
+
+       /*
+        * Finally fill in the IO channel for any remaining CPUs.
+        * At this point, all IO channels have been assigned to a specific
+        * MSIx vector, mapped to a specific CPU.
+        * Base the remaining IO channel assigned, to IO channels already
+        * assigned to other CPUs on the same phys_id.
+        */
+       for (i = 0; i <= max_phys_id; i++) {
+               /*
+                * If there are no io channels already mapped to
+                * this phys_id, just round robin thru the io_channels.
+                * Setup chann[] for round robin.
+                */
+               for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+                       chann[idx] = idx;
+
+               saved_chann = 0;
+               used_chann = 0;
+
+               /*
+                * First build a list of IO channels already assigned
+                * to this phys_id before reassigning the same IO
+                * channels to the remaining CPUs.
+                */
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = first_cpu;
+               cpup += cpu;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
+                    idx++) {
+                       if (cpup->phys_id == i) {
+                               /*
+                                * Save any IO channels that are
+                                * already mapped to this phys_id.
+                                */
+                               if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
+                                       chann[saved_chann] =
+                                               cpup->channel_id;
+                                       saved_chann++;
+                                       goto out;
+                               }
+
+                               /* See if we are using round-robin */
+                               if (saved_chann == 0)
+                                       saved_chann =
+                                               phba->cfg_fcp_io_channel;
+
+                               /* Associate next IO channel with CPU */
+                               cpup->channel_id = chann[used_chann];
+                               num_io_channel++;
+                               used_chann++;
+                               if (used_chann == saved_chann)
+                                       used_chann = 0;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3331 Set IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+out:
+                       cpu++;
+                       if (cpu >= phba->sli4_hba.num_present_cpu) {
+                               cpup = phba->sli4_hba.cpu_map;
+                               cpu = 0;
+                       } else {
+                               cpup++;
+                       }
+               }
+       }
+
+       if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
+               cpup = phba->sli4_hba.cpu_map;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+                       if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
+                               cpup->channel_id = 0;
+                               num_io_channel++;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3332 Assign IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+                       cpup++;
+               }
+       }
+
+       /* Sanity check */
+       if (num_io_channel != phba->sli4_hba.num_present_cpu)
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3333 Set affinity mismatch:"
+                               "%d chann != %d cpus: %d vactors\n",
+                               num_io_channel, phba->sli4_hba.num_present_cpu,
+                               vectors);
+
+       phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
+       return 1;
+}
+
+
 /**
  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  * @phba: pointer to lpfc hba data structure.
@@ -8259,9 +8662,7 @@ enable_msix_vectors:
                                phba->sli4_hba.msix_entries[index].vector,
                                phba->sli4_hba.msix_entries[index].entry);
 
-       /*
-        * Assign MSI-X vectors to interrupt handlers
-        */
+       /* Assign MSI-X vectors to interrupt handlers */
        for (index = 0; index < vectors; index++) {
                memset(&phba->sli4_hba.handler_name[index], 0, 16);
                sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@ enable_msix_vectors:
                                phba->cfg_fcp_io_channel, vectors);
                phba->cfg_fcp_io_channel = vectors;
        }
+
+       lpfc_sli4_set_affinity(phba, vectors);
        return rc;
 
 cfg_fail_out:
@@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
        /* Disable interrupt and pci device */
        lpfc_sli_disable_intr(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
@@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        lpfc_sli4_disable_intr(phba);
        lpfc_sli4_queue_destroy(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
 static int __init
 lpfc_init(void)
 {
+       int cpu;
        int error = 0;
 
        printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@ lpfc_init(void)
                        return -ENOMEM;
                }
        }
+
+       /* Initialize in case vector mapping is needed */
+       for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
+               lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+
        error = pci_register_driver(&lpfc_driver);
        if (error) {
                fc_release_transport(lpfc_transport_template);
index baf53e6c2bd15bfbbf8c25af5face63f1c9fe006..2a4e5d21eab2ad3c10f45dd7bece5c5a7ae02e06 100644 (file)
@@ -37,6 +37,7 @@
 #define LOG_EVENT      0x00010000      /* CT,TEMP,DUMP, logging */
 #define LOG_FIP                0x00020000      /* FIP events */
 #define LOG_FCP_UNDER  0x00040000      /* FCP underruns errors */
+#define LOG_SCSI_CMD   0x00080000      /* ALL SCSI commands */
 #define LOG_ALL_MSG    0xffffffff      /* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
index a7a9fa468308bda8ddaa97d196fa23ce75133824..41363db7d42628b9af9ec5e1a1ab7f927ab6aff9 100644 (file)
@@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 
        /* Only FC supports upd bit */
        if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
-           (vport->fc_flag & FC_VFI_REGISTERED)) {
+           (vport->fc_flag & FC_VFI_REGISTERED) &&
+           (!phba->fc_topology_changed)) {
                bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
                bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
        }
        lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
                        "3134 Register VFI, mydid:x%x, fcfi:%d, "
-                       " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+                       " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
+                       " port_state:x%x topology chg:%d\n",
                        vport->fc_myDID,
                        phba->fcf.fcfi,
                        phba->sli4_hba.vfi_ids[vport->vfi],
                        phba->vpi_ids[vport->vpi],
-                       reg_vfi->wwn[0], reg_vfi->wwn[1]);
+                       reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
+                       vport->port_state, phba->fc_topology_changed);
 }
 
 /**
index cd86069a0ba82a2954238139e8626283fac56d07..812d0cd7c86dcc8c9c3fe86f3ff6b8079ba850ab 100644 (file)
@@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
        struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
        int i;
 
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               /* Calculate alignment */
+               if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+                       i = phba->cfg_sg_dma_buf_size;
+               else
+                       i = SLI4_PAGE_SIZE;
+
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev,
                                phba->cfg_sg_dma_buf_size,
-                               phba->cfg_sg_dma_buf_size,
+                               i,
                                0);
-       else
+       } else {
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev, phba->cfg_sg_dma_buf_size,
                                align, 0);
+       }
+
        if (!phba->lpfc_scsi_dma_buf_pool)
                goto fail;
 
index 82f4d3542289f6a8fab4cc4f99a5b5d3c2604cf1..31e9b92f5a9bebc181e0fffec874d95c94dac6e9 100644 (file)
@@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        /* PLOGI chkparm OK */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+                        "0114 PLOGI chkparm OK Data: x%x x%x x%x "
+                        "x%x x%x x%x\n",
                         ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
-                        ndlp->nlp_rpi);
+                        ndlp->nlp_rpi, vport->port_state,
+                        vport->fc_flag);
 
        if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
                ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@ out:
        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 
        /* 1 sec timeout */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
 
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
                        spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
                (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
                /* Only try to re-login if this is NOT a Fabric Node */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        }
 
        /* Put ndlp in npr state set plogi timer for 1 sec */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
        if ((irsp->ulpStatus) ||
            (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
                /* 1 sec timeout */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
                }
 
                /* Put ndlp in npr state set plogi timer for 1 sec */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
        if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
index 74b8710e1e90499eadcbfc72580b12a4c223c18e..8523b278ec9daf1c2d37ffedf08335b32701cb53 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/export.h>
 #include <linux/delay.h>
 #include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
-int _dump_buf_done;
+int _dump_buf_done = 1;
 
 static char *dif_op_str[] = {
        "PROT_NORMAL",
@@ -66,6 +68,10 @@ struct scsi_dif_tuple {
        __be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
+#define scsi_prot_flagged(sc, flg)     sc
+#endif
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
        dma_addr_t pdma_phys_fcp_rsp;
        dma_addr_t pdma_phys_bpl;
        uint16_t iotag;
-       int bcnt;
+       int bcnt, bpl_size;
+
+       bpl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp), bpl_size);
 
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                             struct list_head *post_sblist, int sb_count)
 {
        struct lpfc_scsi_buf *psb, *psb_next;
-       int status;
+       int status, sgl_size;
        int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
        dma_addr_t pdma_phys_bpl1;
        int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
        if (sb_count <= 0)
                return -EINVAL;
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
        list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
                list_del_init(&psb->list);
                block_cnt++;
@@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                                post_cnt = block_cnt;
                        } else if (block_cnt == 1) {
                                /* last single sgl with non-contiguous xri */
-                               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+                               if (sgl_size > SGL_PAGE_SIZE)
                                        pdma_phys_bpl1 = psb->dma_phys_bpl +
                                                                SGL_PAGE_SIZE;
                                else
@@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
        int num_posted, rc = 0;
 
        /* get all SCSI buffers need to repost to a local list */
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        /* post the list of scsi buffer sgls to port if available */
        if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
        IOCB_t *iocb;
        dma_addr_t pdma_phys_fcp_cmd;
        dma_addr_t pdma_phys_fcp_rsp;
-       dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+       dma_addr_t pdma_phys_bpl;
        uint16_t iotag, lxri = 0;
-       int bcnt, num_posted;
+       int bcnt, num_posted, sgl_size;
        LIST_HEAD(prep_sblist);
        LIST_HEAD(post_sblist);
        LIST_HEAD(scsi_sblist);
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp));
+
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
                if (!psb)
@@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                }
                memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
+               /* Page alignment is CRITICAL, double check to be sure */
+               if (((unsigned long)(psb->data) &
+                   (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+                       pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+                                     psb->data, psb->dma_handle);
+                       kfree(psb);
+                       break;
+               }
+
                /* Allocate iotag for psb->cur_iocbq. */
                iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
                if (iotag == 0) {
@@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
                psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
                psb->fcp_bpl = psb->data;
-               psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
-                       - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               psb->fcp_cmnd = (psb->data + sgl_size);
                psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
                                        sizeof(struct fcp_cmnd));
 
                /* Initialize local short-hand pointers. */
                sgl = (struct sli4_sge *)psb->fcp_bpl;
                pdma_phys_bpl = psb->dma_handle;
-               pdma_phys_fcp_cmd =
-                       (psb->dma_handle + phba->cfg_sg_dma_buf_size)
-                        - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
                pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 
                /*
@@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                iocb->ulpLe = 1;
                iocb->ulpClass = CLASS3;
                psb->cur_iocbq.context1 = psb;
-               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-                       pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
-               else
-                       pdma_phys_bpl1 = 0;
                psb->dma_phys_bpl = pdma_phys_bpl;
 
                /* add the scsi buffer to a post list */
                list_add_tail(&psb->list, &post_sblist);
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt++;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_BG,
                        "3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct  lpfc_scsi_buf * lpfc_cmd = NULL;
-       struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
-       unsigned long iflag = 0;
-
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
-       if (lpfc_cmd) {
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
+       struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+                        list);
+       if (!lpfc_cmd) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               list_remove_head(scsi_buf_list_get, lpfc_cmd,
+                                struct lpfc_scsi_buf, list);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        return  lpfc_cmd;
 }
 /**
@@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct lpfc_scsi_buf *lpfc_cmd ;
-       unsigned long iflag = 0;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
        int found = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
-                                                       list) {
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
                if (lpfc_test_rrq_active(phba, ndlp,
                                         lpfc_cmd->cur_iocbq.sli4_lxritag))
                        continue;
                list_del(&lpfc_cmd->list);
                found = 1;
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
                break;
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
-                                                iflag);
+       if (!found) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+               list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
+                                   list) {
+                       if (lpfc_test_rrq_active(
+                               phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
+                               continue;
+                       list_del(&lpfc_cmd->list);
+                       found = 1;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        if (!found)
                return NULL;
-       else
-               return  lpfc_cmd;
+       return  lpfc_cmd;
 }
 /**
  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
        psb->pCmd = NULL;
-       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 }
 
 /**
@@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
        if (psb->exch_busy) {
                spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
@@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
                spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
        } else {
-
-               spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
                psb->pCmd = NULL;
-               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-               spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+               psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        }
 }
 
@@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                               "dma_map_sg.  Config %d, seg_cnt %d\n",
                               __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
        bf_set(pde6_optx, pde6, txop);
        bf_set(pde6_oprx, pde6, rxop);
+
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
        if (datadir == DMA_FROM_DEVICE) {
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
        }
        bf_set(pde6_ai, pde6, 1);
        bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_bde >= (phba->cfg_total_seg_cnt - 2))
+                       return num_bde + 3;
+
                /* setup PDE5 with what we have */
                pde5 = (struct lpfc_pde5 *) bpl;
                memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
                bf_set(pde6_optx, pde6, txop);
                bf_set(pde6_oprx, pde6, rxop);
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
+
                bf_set(pde6_ai, pde6, 1);
                bf_set(pde6_ae, pde6, 0);
                bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_bde >= phba->cfg_total_seg_cnt)
+                               return num_bde + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        struct sli4_sge_diseed *diseed = NULL;
        dma_addr_t physaddr;
        int i = 0, num_sge = 0, status;
-       int datadir = sc->sc_data_direction;
        uint32_t reftag;
        unsigned blksize;
        uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        diseed->ref_tag = cpu_to_le32(reftag);
        diseed->ref_tag_tran = diseed->ref_tag;
 
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+       }
+
        /* setup DISEED with the rest of the info */
        bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
        bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-       if (datadir == DMA_FROM_DEVICE) {
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
-       }
+
        bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
        bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+                       return num_sge + 3;
+
                /* setup DISEED with what we have */
                diseed = (struct sli4_sge_diseed *) sgl;
                memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                diseed->ref_tag = cpu_to_le32(reftag);
                diseed->ref_tag_tran = diseed->ref_tag;
 
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+
+               } else {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+                       /*
+                        * When in this mode, the hardware will replace
+                        * the guard tag from the host with a
+                        * newly generated good CRC for the wire.
+                        * Switch to raw mode here to avoid this
+                        * behavior. What the host sends gets put on the wire.
+                        */
+                       if (txop == BG_OP_IN_CRC_OUT_CRC) {
+                               txop = BG_OP_RAW_MODE;
+                               rxop = BG_OP_RAW_MODE;
+                       }
+               }
+
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+
                /* setup DISEED with the rest of the info */
                bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
                bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+
                bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
                bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_sge >= phba->cfg_total_seg_cnt)
+                               return num_sge + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9086 BLKGRD:%s Invalid data segment\n",
@@ -2669,6 +2800,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
        return ret;
 }
 
+/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+                      struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+       int fcpdl;
+
+       fcpdl = scsi_bufflen(sc);
+
+       /* Check if there is protection data on the wire */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               /* Read */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+                       return fcpdl;
+
+       } else {
+               /* Write */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+                       return fcpdl;
+       }
+
+       /*
+        * If we are in DIF Type 1 mode every data block has a 8 byte
+        * DIF (trailer) attached to it. Must ajust FCP data length.
+        */
+       if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
+               fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+
+       return fcpdl;
+}
+
 /**
  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        uint32_t num_bde = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
-       int diflen, fcpdl;
-       unsigned blksize;
+       int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        return 1;
 
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9067 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
+
+                       /* Here we need to add a PDE5 and PDE6 to the count */
+                       if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+                               goto err;
+
                        num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
                                        datasegcnt);
                        /* we should have 2 or more entries in buffer list */
                        if (num_bde < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9068 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+
+                       /*
+                        * There is a minimun of 4 BPLs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 4) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
                        num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
                                        datasegcnt, protsegcnt);
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_bde < 3) ||
+                           (num_bde > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9022 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        iocb_cmd->ulpBdeCount = 1;
        iocb_cmd->ulpLe = 1;
 
-       fcpdl = scsi_bufflen(scsi_cmnd);
-
-       if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
-               /*
-                * We are in DIF Type 1 mode
-                * Every data block has a 8 byte DIF (trailer)
-                * attached to it.  Must ajust FCP data length
-                */
-               blksize = lpfc_cmd_blksize(scsi_cmnd);
-               diflen = (fcpdl / blksize) * 8;
-               fcpdl += diflen;
-       }
+       fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -2812,13 +2969,233 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9023 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
+                       "9023 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
                        prot_group_type, num_bde);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CRC algorithmn
+ * using crc_t10dif.
+ */
+uint16_t
+lpfc_bg_crc(uint8_t *data, int count)
+{
+       uint16_t crc = 0;
+       uint16_t x;
+
+       crc = crc_t10dif(data, count);
+       x = cpu_to_be16(crc);
+       return x;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CSUM algorithmn
+ * using ip_compute_csum.
+ */
+uint16_t
+lpfc_bg_csum(uint8_t *data, int count)
+{
+       uint16_t ret;
+
+       ret = ip_compute_csum(data, count);
+       return ret;
+}
+
+/*
+ * This function examines the protection data to try to determine
+ * what type of T10-DIF error occurred.
+ */
+void
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scatterlist *sgpe; /* s/g prot entry */
+       struct scatterlist *sgde; /* s/g data entry */
+       struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+       struct scsi_dif_tuple *src = NULL;
+       uint8_t *data_src = NULL;
+       uint16_t guard_tag, guard_type;
+       uint16_t start_app_tag, app_tag;
+       uint32_t start_ref_tag, ref_tag;
+       int prot, protsegcnt;
+       int err_type, len, data_len;
+       int chk_ref, chk_app, chk_guard;
+       uint16_t sum;
+       unsigned blksize;
+
+       err_type = BGS_GUARD_ERR_MASK;
+       sum = 0;
+       guard_tag = 0;
+
+       /* First check to see if there is protection data to examine */
+       prot = scsi_get_prot_op(cmd);
+       if ((prot == SCSI_PROT_READ_STRIP) ||
+           (prot == SCSI_PROT_WRITE_INSERT) ||
+           (prot == SCSI_PROT_NORMAL))
+               goto out;
+
+       /* Currently the driver just supports ref_tag and guard_tag checking */
+       chk_ref = 1;
+       chk_app = 0;
+       chk_guard = 0;
+
+       /* Setup a ptr to the protection data provided by the SCSI host */
+       sgpe = scsi_prot_sglist(cmd);
+       protsegcnt = lpfc_cmd->prot_seg_cnt;
+
+       if (sgpe && protsegcnt) {
+
+               /*
+                * We will only try to verify guard tag if the segment
+                * data length is a multiple of the blksize.
+                */
+               sgde = scsi_sglist(cmd);
+               blksize = lpfc_cmd_blksize(cmd);
+               data_src = (uint8_t *)sg_virt(sgde);
+               data_len = sgde->length;
+               if ((data_len & (blksize - 1)) == 0)
+                       chk_guard = 1;
+               guard_type = scsi_host_get_guard(cmd->device->host);
+
+               start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+               start_app_tag = src->app_tag;
+               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+               len = sgpe->length;
+               while (src && protsegcnt) {
+                       while (len) {
+
+                               /*
+                                * First check to see if a protection data
+                                * check is valid
+                                */
+                               if ((src->ref_tag == 0xffffffff) ||
+                                   (src->app_tag == 0xffff)) {
+                                       start_ref_tag++;
+                                       goto skipit;
+                               }
+
+                               /* App Tag checking */
+                               app_tag = src->app_tag;
+                               if (chk_app && (app_tag != start_app_tag)) {
+                                       err_type = BGS_APPTAG_ERR_MASK;
+                                       goto out;
+                               }
+
+                               /* Reference Tag checking */
+                               ref_tag = be32_to_cpu(src->ref_tag);
+                               if (chk_ref && (ref_tag != start_ref_tag)) {
+                                       err_type = BGS_REFTAG_ERR_MASK;
+                                       goto out;
+                               }
+                               start_ref_tag++;
+
+                               /* Guard Tag checking */
+                               if (chk_guard) {
+                                       guard_tag = src->guard_tag;
+                                       if (guard_type == SHOST_DIX_GUARD_IP)
+                                               sum = lpfc_bg_csum(data_src,
+                                                                  blksize);
+                                       else
+                                               sum = lpfc_bg_crc(data_src,
+                                                                 blksize);
+                                       if ((guard_tag != sum)) {
+                                               err_type = BGS_GUARD_ERR_MASK;
+                                               goto out;
+                                       }
+                               }
+skipit:
+                               len -= sizeof(struct scsi_dif_tuple);
+                               if (len < 0)
+                                       len = 0;
+                               src++;
+
+                               data_src += blksize;
+                               data_len -= blksize;
+
+                               /*
+                                * Are we at the end of the Data segment?
+                                * The data segment is only used for Guard
+                                * tag checking.
+                                */
+                               if (chk_guard && (data_len == 0)) {
+                                       chk_guard = 0;
+                                       sgde = sg_next(sgde);
+                                       if (!sgde)
+                                               goto out;
+
+                                       data_src = (uint8_t *)sg_virt(sgde);
+                                       data_len = sgde->length;
+                                       if ((data_len & (blksize - 1)) == 0)
+                                               chk_guard = 1;
+                               }
+                       }
+
+                       /* Goto the next Protection data segment */
+                       sgpe = sg_next(sgpe);
+                       if (sgpe) {
+                               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+                               len = sgpe->length;
+                       } else {
+                               src = NULL;
+                       }
+                       protsegcnt--;
+               }
+       }
+out:
+       if (err_type == BGS_GUARD_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x1);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+               phba->bg_guard_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               sum, guard_tag);
+
+       } else if (err_type == BGS_REFTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x3);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_reftag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               ref_tag, start_ref_tag);
+
+       } else if (err_type == BGS_APPTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x2);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_apptag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               app_tag, start_app_tag);
+       }
+}
+
+
 /*
  * This function checks for BlockGuard errors detected by
  * the HBA.  In case of errors, the ASC/ASCQ fields in the
@@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
        uint32_t bgstat = bgf->bgstat;
        uint64_t failing_sector = 0;
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
-                       " 0x%x lba 0x%llx blk cnt 0x%x "
-                       "bgstat=0x%x bghm=0x%x\n",
-                       cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
-                       blk_rq_sectors(cmd->request), bgstat, bghm);
-
        spin_lock(&_dump_buf_lock);
        if (!_dump_buf_done) {
                lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (lpfc_bgs_get_invalid_prof(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
-                       " BlockGuard profile. bgstat:0x%x\n",
-                       bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9072 BLKGRD: Invalid BG Profile in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
 
        if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
-                               "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
-                               bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9073 BLKGRD: Invalid BG PDIF Block in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
@@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                cmd->result = DRIVER_SENSE << 24
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
                phba->bg_guard_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9055 BLKGRD: guard_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9055 BLKGRD: Guard Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_reftag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9056 BLKGRD: ref_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9056 BLKGRD: Ref Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_apptag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9061 BLKGRD: app_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9061 BLKGRD: App Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (!ret) {
                /* No error was reported - problem in FW? */
-               cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9057 BLKGRD: Unknown error reported!\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9057 BLKGRD: Unknown error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
+
+               /* Calcuate what type of error it was */
+               lpfc_calc_bg_err(phba, lpfc_cmd);
        }
-
 out:
        return ret;
 }
@@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                                "dma_map_sg.  Config %d, seg_cnt %d\n",
                                __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -3093,45 +3488,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
        return 0;
 }
 
-/**
- * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
- * @phba: The Hba for which this call is being executed.
- * @lpfc_cmd: The scsi buffer which is going to be adjusted.
- *
- * Adjust the data length to account for how much data
- * is actually on the wire.
- *
- * returns the adjusted data length
- **/
-static int
-lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
-               struct lpfc_scsi_buf *lpfc_cmd)
-{
-       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
-       int diflen, fcpdl;
-       unsigned blksize;
-
-       fcpdl = scsi_bufflen(sc);
-
-       /* Check if there is protection data on the wire */
-       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-               /* Read */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
-                       return fcpdl;
-
-       } else {
-               /* Write */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
-                       return fcpdl;
-       }
-
-       /* If protection data on the wire, adjust the count accordingly */
-       blksize = lpfc_cmd_blksize(sc);
-       diflen = (fcpdl / blksize) * 8;
-       fcpdl += diflen;
-       return fcpdl;
-}
-
 /**
  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
        struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
-       uint32_t num_bde = 0;
+       uint32_t num_sge = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
        int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
-        *  fcp_rsp regions to the first data bde entry
+        *  fcp_rsp regions to the first data sge entry
         */
        if (scsi_sg_count(scsi_cmnd)) {
                /*
@@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
                sgl += 1;
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9087 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
-                       num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+                       /* Here we need to add a DISEED to the count */
+                       if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+                               goto err;
+
+                       num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
                                        datasegcnt);
+
                        /* we should have 2 or more entries in buffer list */
-                       if (num_bde < 2)
+                       if (num_sge < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9088 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+                       /*
+                        * There is a minimun of 3 SGEs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 3) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
-                       num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+                       num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
                                        datasegcnt, protsegcnt);
+
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_sge < 3) ||
+                           (num_sge > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9083 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        }
 
        fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
-
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9084 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
-                       prot_group_type, num_bde);
+                       "9084 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+                       prot_group_type, num_sge);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
@@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 
        if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9033 BLKGRD: rcvd %s cmd:x%x "
                                         "sector x%llx cnt %u pt %x\n",
                                         dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
        } else {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
                                         "x%x sector x%llx cnt %u pt %x\n",
                                         cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        /* Wait for abort to complete */
        wait_event_timeout(waitq,
                          (lpfc_cmd->pCmd != cmnd),
-                          (2*vport->cfg_devloss_tmo*HZ));
+                          msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
        lpfc_cmd->waitq = NULL;
 
        if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
        struct lpfc_hba *phba = vport->phba;
        int rc, ret = SUCCESS;
 
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                        "3172 SCSI layer issued Host Reset Data:\n");
+
        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
        lpfc_offline(phba);
        rc = lpfc_sli_brdrestart(phba);
        if (rc)
                ret = FAILED;
-       lpfc_online(phba);
+       rc = lpfc_online(phba);
+       if (rc)
+               ret = FAILED;
        lpfc_unblock_mgmt_io(phba);
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
+       if (ret == FAILED) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                "3323 Failed host reset, bring it offline\n");
+               lpfc_sli4_offline_eratt(phba);
+       }
        return ret;
 }
 
@@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev)
        }
        num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
        if (num_to_alloc != num_allocated) {
-                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-                                "0708 Allocation request of %d "
-                                "command buffers did not succeed.  "
-                                "Allocated %d buffers.\n",
-                                num_to_alloc, num_allocated);
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                        "0708 Allocation request of %d "
+                                        "command buffers did not succeed.  "
+                                        "Allocated %d buffers.\n",
+                                        num_to_alloc, num_allocated);
        }
        if (num_allocated > 0)
                phba->total_scsi_bufs += num_allocated;
index 35dd17eb0f27907f67f4c50968a95452498b93be..572579f87de4fb1c3f74268da15ff8d226384103 100644 (file)
@@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
 
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov + 1);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        list_for_each_entry_safe(rrq, nextrrq,
                                 &phba->active_rrq_list, list) {
                if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
                return;
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov * 2);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
        list_splice_init(&phba->active_rrq_list, &rrq_list);
        spin_unlock_irqrestore(&phba->hbalock, iflags);
 
@@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        else
                rrq->send_rrq = 0;
        rrq->xritag = xritag;
-       rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+       rrq->rrq_stop_time = jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        rrq->ndlp = ndlp;
        rrq->nlp_DID = ndlp->nlp_DID;
        rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
                        !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
                ndlp = piocbq->context_un.ndlp;
-       else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
-                       (piocbq->iocb_flag & LPFC_IO_LIBDFC))
+       else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
                ndlp = piocbq->context_un.ndlp;
        else
                ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        BUG();
                else
                        mod_timer(&piocb->vport->els_tmofunc,
-                                 jiffies + HZ * (phba->fc_ratov << 1));
+                               jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
        }
 
 
@@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                /* Mailbox cmd <cmd> Cmpl <cmpl> */
                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                                "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
-                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                               "x%x x%x x%x\n",
                                pmb->vport ? pmb->vport->vpi : 0,
                                pmbox->mbxCommand,
                                lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                                pmbox->un.varWords[4],
                                pmbox->un.varWords[5],
                                pmbox->un.varWords[6],
-                               pmbox->un.varWords[7]);
+                               pmbox->un.varWords[7],
+                               pmbox->un.varWords[8],
+                               pmbox->un.varWords[9],
+                               pmbox->un.varWords[10]);
 
                if (pmb->mbox_cmpl)
                        pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
                lpfc_worker_wake_up(phba);
        else
                /* Restart the timer for next eratt poll */
-               mod_timer(&phba->eratt_poll, jiffies +
-                                       HZ * LPFC_ERATT_POLL_INTERVAL);
+               mod_timer(&phba->eratt_poll,
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
        return;
 }
 
@@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
                        list_del_init(&rsrc_blk->list);
                        kfree(rsrc_blk);
                }
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                break;
        case LPFC_RSC_TYPE_FCOE_XRI:
                kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
        } else {
                kfree(phba->vpi_bmask);
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                kfree(phba->vpi_ids);
                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
                kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        struct lpfc_sglq *sglq_entry = NULL;
        struct lpfc_sglq *sglq_entry_next = NULL;
        struct lpfc_sglq *sglq_entry_first = NULL;
-       int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+       int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
        int last_xritag = NO_XRI;
        LIST_HEAD(prep_sgl_list);
        LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
        spin_unlock_irq(&phba->hbalock);
 
+       total_cnt = phba->sli4_hba.els_xri_cnt;
        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
                                 &allc_sgl_list, list) {
                list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                                sglq_entry->sli4_xritag);
                                        list_add_tail(&sglq_entry->list,
                                                      &free_sgl_list);
-                                       spin_lock_irq(&phba->hbalock);
-                                       phba->sli4_hba.els_xri_cnt--;
-                                       spin_unlock_irq(&phba->hbalock);
+                                       total_cnt--;
                                }
                        }
                }
@@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                        (sglq_entry_first->sli4_xritag +
                                         post_cnt - 1));
                        list_splice_init(&blck_sgl_list, &free_sgl_list);
-                       spin_lock_irq(&phba->hbalock);
-                       phba->sli4_hba.els_xri_cnt -= post_cnt;
-                       spin_unlock_irq(&phba->hbalock);
+                       total_cnt -= post_cnt;
                }
 
                /* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                /* reset els sgl post count for next round of posting */
                post_cnt = 0;
        }
+       /* update the number of XRIs posted for ELS */
+       phba->sli4_hba.els_xri_cnt = total_cnt;
 
        /* free the els sgls failed to post */
        lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 
        /* Start the ELS watchdog timer */
        mod_timer(&vport->els_tmofunc,
-                 jiffies + HZ * (phba->fc_ratov * 2));
+                 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
 
        /* Start heart beat timer */
        mod_timer(&phba->hb_tmofunc,
-                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
 
        /* Start error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        goto out_not_finished;
                }
                /* timeout active mbox command */
-               mod_timer(&psli->mbox_tmo, (jiffies +
-                              (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+               timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+                                          1000);
+               mod_timer(&psli->mbox_tmo, jiffies + timeout);
        }
 
        /* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
 
        /* Start timer for the mbox_tmo and log some mailbox post messages */
        mod_timer(&psli->mbox_tmo, (jiffies +
-                 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+                 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
 
        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                        "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 static inline uint32_t
 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
-       int i;
-
-       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
-               i = smp_processor_id();
-       else
-               i = atomic_add_return(1, &phba->fcp_qidx);
+       struct lpfc_vector_map_info *cpup;
+       int chann, cpu;
 
-       i = (i % phba->cfg_fcp_io_channel);
-       return i;
+       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+               cpu = smp_processor_id();
+               if (cpu < phba->sli4_hba.num_present_cpu) {
+                       cpup = phba->sli4_hba.cpu_map;
+                       cpup += cpu;
+                       return cpup->channel_id;
+               }
+               chann = cpu;
+       }
+       chann = atomic_add_return(1, &phba->fcp_qidx);
+       chann = (chann % phba->cfg_fcp_io_channel);
+       return chann;
 }
 
 /**
@@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 
        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
                (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+               if (unlikely(!phba->sli4_hba.fcp_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
                                     &wqe))
                        return IOCB_ERROR;
        } else {
+               if (unlikely(!phba->sli4_hba.els_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
                        return IOCB_ERROR;
        }
@@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
        retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
                                     SLI_IOCB_RET_IOCB);
        if (retval == IOCB_SUCCESS) {
-               timeout_req = timeout * HZ;
+               timeout_req = msecs_to_jiffies(timeout * 1000);
                timeleft = wait_event_timeout(done_q,
                                lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
                                timeout_req);
@@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
        if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
                wait_event_interruptible_timeout(done_q,
                                pmboxq->mbox_flag & LPFC_MBX_WAKE,
-                               timeout * HZ);
+                               msecs_to_jiffies(timeout * 1000));
 
                spin_lock_irqsave(&phba->hbalock, flag);
                pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                }
                wq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3264 WQ[%d]: barset:x%x, offset:x%x\n",
-                               wq->queue_id, pci_barset, db_offset);
+                               "3264 WQ[%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", wq->queue_id, pci_barset,
+                               db_offset, wq->db_format);
        } else {
                wq->db_format = LPFC_DB_LIST_FORMAT;
                wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                }
                hrq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
-                               hrq->queue_id, pci_barset, db_offset);
+                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", hrq->queue_id, pci_barset,
+                               db_offset, hrq->db_format);
        } else {
                hrq->db_format = LPFC_DB_RING_FORMAT;
                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        }
 
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "2538 Received frame rctl:%s type:%s "
-                       "Frame Data:%08x %08x %08x %08x %08x %08x\n",
-                       rctl_names[fc_hdr->fh_r_ctl],
-                       type_names[fc_hdr->fh_type],
+                       "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+                       "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+                       rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+                       type_names[fc_hdr->fh_type], fc_hdr->fh_type,
                        be32_to_cpu(header[0]), be32_to_cpu(header[1]),
                        be32_to_cpu(header[2]), be32_to_cpu(header[3]),
-                       be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+                       be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+                       be32_to_cpu(header[6]));
        return 0;
 drop:
        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
index be02b59ea2797a53972a1795dcd4b0b1c156a846..67af460184badc9a2104cd477e099aeb8112a573 100644 (file)
@@ -346,11 +346,6 @@ struct lpfc_bmbx {
 #define SLI4_CT_VFI 2
 #define SLI4_CT_FCFI 3
 
-#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
-#define LPFC_SLI4_FL1_MAX_BUF_SIZE     0X2000
-#define LPFC_SLI4_MIN_BUF_SIZE         0x400
-#define LPFC_SLI4_MAX_BUF_SIZE         0x20000
-
 /*
  * SLI4 specific data structures
  */
@@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
 
 #define LPFC_SLI4_HANDLER_NAME_SZ      16
 
+/* Used for IRQ vector to CPU mapping */
+struct lpfc_vector_map_info {
+       uint16_t        phys_id;
+       uint16_t        core_id;
+       uint16_t        irq;
+       uint16_t        channel_id;
+       struct cpumask  maskbits;
+};
+#define LPFC_VECTOR_MAP_EMPTY  0xffff
+#define LPFC_MAX_CPU           256
+
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {
        void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
        struct lpfc_iov iov;
        spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
        spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+
+       /* CPU to vector mapping information */
+       struct lpfc_vector_map_info *cpu_map;
+       uint16_t num_online_cpu;
+       uint16_t num_present_cpu;
 };
 
 enum lpfc_sge_type {
index 664cd04f7cd8091dcaf0396f889b0d0852a4e6f8..a38dc3b169697ee2915cee01158334a240eb51b1 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.38"
+#define LPFC_DRIVER_VERSION "8.3.39"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 0fe188e66000da063c01feec76b557a075545258..e28e431564b08f5b90693c3dc49c5d218e67a262 100644 (file)
@@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
        }
 }
 
-static int
+int
 lpfc_alloc_vpi(struct lpfc_hba *phba)
 {
        unsigned long vpi;
@@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
        struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
        struct lpfc_hba   *phba = vport->phba;
        long timeout;
+       bool ns_ndlp_referenced = false;
 
        if (vport->port_type == LPFC_PHYSICAL_PORT) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 
        lpfc_debugfs_terminate(vport);
 
+       /*
+        * The call to fc_remove_host might release the NameServer ndlp. Since
+        * we might need to use the ndlp to send the DA_ID CT command,
+        * increment the reference for the NameServer ndlp to prevent it from
+        * being released.
+        */
+       ndlp = lpfc_findnode_did(vport, NameServer_DID);
+       if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+               lpfc_nlp_get(ndlp);
+               ns_ndlp_referenced = true;
+       }
+
        /* Remove FC host and then SCSI host with the vport */
        fc_remove_host(lpfc_shost_from_vport(vport));
        scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
                lpfc_discovery_wait(vport);
 
 skip_logo:
+
+       /*
+        * If the NameServer ndlp has been incremented to allow the DA_ID CT
+        * command to be sent, decrement the ndlp now.
+        */
+       if (ns_ndlp_referenced) {
+               ndlp = lpfc_findnode_did(vport, NameServer_DID);
+               lpfc_nlp_put(ndlp);
+       }
+
        lpfc_cleanup(vport);
        lpfc_sli_host_down(vport);
 
index 90828340aceadbd275c7184b8ad465582e894e2a..6b2c94eb8134300e72b4d06571994c0c2fe5d32b 100644 (file)
@@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
 int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
 struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
 void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+int lpfc_alloc_vpi(struct lpfc_hba *phba);
 
 /*
  *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
index 7c90d57b867e2c332e1a953a3c01892f971bd779..3a9ddae86f1f8e2d5e33474a7f0f3510ec413c65 100644 (file)
@@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
                printk(KERN_ERR "megaraid_sas: timed out while"
                        "waiting for HBA to recover\n");
                error = -ENODEV;
-               goto out_kfree_ioc;
+               goto out_up;
        }
        spin_unlock_irqrestore(&instance->hba_lock, flags);
 
        error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+      out_up:
        up(&instance->ioctl_sem);
 
       out_kfree_ioc:
index 74550922ad55e418eb7e37753e0983023728943b..7b7381d7671fdc9cfeecdf3c256984b0261d3116 100644 (file)
@@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
        }
        for (i = 0; i < MVS_MAX_DEVICES; i++) {
                mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
-               mvi->devices[i].dev_type = NO_DEVICE;
+               mvi->devices[i].dev_type = SAS_PHY_UNUSED;
                mvi->devices[i].device_id = i;
                mvi->devices[i].dev_status = MVS_DEV_NORMAL;
                init_timer(&mvi->devices[i].timer);
index 532110f4562af58b7517b7b72bfaf0d0f5e25653..c9e244984e30ec1dfed39c7b574b71caffeb0794 100644 (file)
@@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
        return 0;
 }
 
-#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
                                struct mvs_tmf_task *tmf, int *pass)
 {
@@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
                 * libsas will use dev->port, should
                 * not call task_done for sata
                 */
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        task->task_done(task);
                return rc;
        }
@@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
                        phy->identify.device_type =
                                phy->att_dev_info & PORT_DEV_TYPE_MASK;
 
-                       if (phy->identify.device_type == SAS_END_DEV)
+                       if (phy->identify.device_type == SAS_END_DEVICE)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SSP;
-                       else if (phy->identify.device_type != NO_DEVICE)
+                       else if (phy->identify.device_type != SAS_PHY_UNUSED)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SMP;
                        if (oob_done)
@@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
 {
        u32 dev;
        for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
-               if (mvi->devices[dev].dev_type == NO_DEVICE) {
+               if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        mvi->devices[dev].device_id = dev;
                        return &mvi->devices[dev];
                }
@@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
        u32 id = mvi_dev->device_id;
        memset(mvi_dev, 0, sizeof(*mvi_dev));
        mvi_dev->device_id = id;
-       mvi_dev->dev_type = NO_DEVICE;
+       mvi_dev->dev_type = SAS_PHY_UNUSED;
        mvi_dev->dev_status = MVS_DEV_NORMAL;
        mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
 }
@@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
 {
        int rc;
        struct sas_phy *phy = sas_get_local_phy(dev);
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                        (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
        rc = sas_phy_reset(phy, reset_type);
        sas_put_local_phy(phy);
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
 
        } else if (task->task_proto & SAS_PROTOCOL_SATA ||
                task->task_proto & SAS_PROTOCOL_STP) {
-               if (SATA_DEV == dev->dev_type) {
+               if (SAS_SATA_DEV == dev->dev_type) {
                        struct mvs_slot_info *slot = task->lldd_task;
                        u32 slot_idx = (u32)(slot - mvi->slot_info);
                        mv_dprintk("mvs_abort_task() mvi=%p task=%p "
index 9f3cc13a5ce7ef70ca9c59c29530a8481fd679fc..60e2fb7f2dca7e37512b128bea9b59fd1c9cd579 100644 (file)
@@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch;
 extern struct kmem_cache *mvs_task_list_cache;
 
 #define DEV_IS_EXPANDER(type)  \
-       ((type == EDGE_DEV) || (type == FANOUT_DEV))
+       ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define bit(n) ((u64)1 << n)
 
@@ -241,7 +241,7 @@ struct mvs_phy {
 
 struct mvs_device {
        struct list_head                dev_entry;
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        struct mvs_info *mvi_info;
        struct domain_device *sas_device;
        struct timer_list timer;
index 52f04296171c3e1c3dc0963f6e5e8a6b3037f92f..ce4cd87c7c662a6de92f71331e1e49052f4c8e4f 100644 (file)
@@ -4,9 +4,10 @@
 # Copyright (C) 2008-2009  USI Co., Ltd.
 
 
-obj-$(CONFIG_SCSI_PM8001) += pm8001.o
-pm8001-y += pm8001_init.o \
+obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+pm80xx-y += pm8001_init.o \
                pm8001_sas.o  \
                pm8001_ctl.o  \
-               pm8001_hwi.o
+               pm8001_hwi.o  \
+               pm80xx_hwi.o
 
index 45bc197bc22f2a45c46f5176cabefddc69e077f2..d99f41c2ca13b927c66d34036937d3443b29f862 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-               pm8001_ha->main_cfg_tbl.interface_rev);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
+       }
 }
 static
 DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+       }
 }
 static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
 /**
@@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       pm8001_ha->main_cfg_tbl.max_out_io);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
+       }
 }
 static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
 /**
@@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
+                       );
+       }
 }
 static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
 /**
@@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
+                       );
+       }
 }
 static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
 
@@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-       mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
+       /* fe000000 means supports SAS2.1 */
+       if (pm8001_ha->chip_id == chip_8001)
+               mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
+       else
+               /* fe000000 means supports SAS2.1 */
+               mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
        return show_sas_spec_support_status(mode, buf);
 }
 static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
                goto out;
        }
        payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
-       memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
+       memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
                                pm8001_ha->fw_image->size);
        payload->length = pm8001_ha->fw_image->size;
        payload->id = 0;
+       payload->minor_function = 0x1;
        pm8001_ha->nvmd_completion = &completion;
        ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
        wait_for_completion(&completion);
@@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
                        payload->length = 1024*16;
                        payload->id = 0;
                        fwControl =
-                             (struct fw_control_info *)payload->func_specific;
+                             (struct fw_control_info *)&payload->func_specific;
                        fwControl->len = IOCTL_BUF_SIZE;   /* IN */
                        fwControl->size = partitionSize + HEADER_LEN;/* IN */
                        fwControl->retcode = 0;/* OUT */
index c3d20c8d4abe3724308182697a558dac7440a07a..479c5a7a863a8aa8fded8e4164b8377d3658089d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 enum chip_flavors {
        chip_8001,
+       chip_8008,
+       chip_8009,
+       chip_8018,
+       chip_8019
 };
-#define USI_MAX_MEMCNT                 9
-#define PM8001_MAX_DMA_SG              SG_ALL
+
 enum phy_speed {
        PHY_SPEED_15 = 0x01,
        PHY_SPEED_30 = 0x02,
@@ -69,23 +72,34 @@ enum port_type {
 #define PM8001_MPI_QUEUE         1024   /* maximum mpi queue entries */
 #define        PM8001_MAX_INB_NUM       1
 #define        PM8001_MAX_OUTB_NUM      1
+#define        PM8001_MAX_SPCV_INB_NUM         1
+#define        PM8001_MAX_SPCV_OUTB_NUM        4
 #define        PM8001_CAN_QUEUE         508    /* SCSI Queue depth */
 
+/* Inbound/Outbound queue size */
+#define IOMB_SIZE_SPC          64
+#define IOMB_SIZE_SPCV         128
+
 /* unchangeable hardware details */
-#define        PM8001_MAX_PHYS          8      /* max. possible phys */
-#define        PM8001_MAX_PORTS         8      /* max. possible ports */
-#define        PM8001_MAX_DEVICES       1024   /* max supported device */
+#define        PM8001_MAX_PHYS          16     /* max. possible phys */
+#define        PM8001_MAX_PORTS         16     /* max. possible ports */
+#define        PM8001_MAX_DEVICES       2048   /* max supported device */
+#define        PM8001_MAX_MSIX_VEC      64     /* max msi-x int for spcv/ve */
 
+#define USI_MAX_MEMCNT_BASE    5
+#define IB                     (USI_MAX_MEMCNT_BASE + 1)
+#define CI                     (IB + PM8001_MAX_SPCV_INB_NUM)
+#define OB                     (CI + PM8001_MAX_SPCV_INB_NUM)
+#define PI                     (OB + PM8001_MAX_SPCV_OUTB_NUM)
+#define USI_MAX_MEMCNT         (PI + PM8001_MAX_SPCV_OUTB_NUM)
+#define PM8001_MAX_DMA_SG      SG_ALL
 enum memory_region_num {
        AAP1 = 0x0, /* application acceleration processor */
        IOP,        /* IO processor */
-       CI,         /* consumer index */
-       PI,         /* producer index */
-       IB,         /* inbound queue */
-       OB,         /* outbound queue */
        NVMD,       /* NVM device */
        DEV_MEM,    /* memory for devices */
        CCB_MEM,    /* memory for command control block */
+       FW_FLASH    /* memory for fw flash update */
 };
 #define        PM8001_EVENT_LOG_SIZE    (128 * 1024)
 
index b8dd05074abb4f61faebbe0af12bd3074fd8b930..69dd49c05f1e1069b2aea0544734f52ea5126abb 100644 (file)
 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
-       pm8001_ha->main_cfg_tbl.signature       = pm8001_mr32(address, 0x00);
-       pm8001_ha->main_cfg_tbl.interface_rev   = pm8001_mr32(address, 0x04);
-       pm8001_ha->main_cfg_tbl.firmware_rev    = pm8001_mr32(address, 0x08);
-       pm8001_ha->main_cfg_tbl.max_out_io      = pm8001_mr32(address, 0x0C);
-       pm8001_ha->main_cfg_tbl.max_sgl         = pm8001_mr32(address, 0x10);
-       pm8001_ha->main_cfg_tbl.ctrl_cap_flag   = pm8001_mr32(address, 0x14);
-       pm8001_ha->main_cfg_tbl.gst_offset      = pm8001_mr32(address, 0x18);
-       pm8001_ha->main_cfg_tbl.inbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.signature    =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io   =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl      =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset   =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
                pm8001_mr32(address, MAIN_IBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.outbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
                pm8001_mr32(address, MAIN_OBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.hda_mode_flag   =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag        =
                pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
 
        /* read analog Setting offset from the configuration table */
-       pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
                pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
 
        /* read Error Dump Offset and Length */
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
 }
 
@@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->general_stat_tbl_addr;
-       pm8001_ha->gs_tbl.gst_len_mpistate      = pm8001_mr32(address, 0x00);
-       pm8001_ha->gs_tbl.iq_freeze_state0      = pm8001_mr32(address, 0x04);
-       pm8001_ha->gs_tbl.iq_freeze_state1      = pm8001_mr32(address, 0x08);
-       pm8001_ha->gs_tbl.msgu_tcnt             = pm8001_mr32(address, 0x0C);
-       pm8001_ha->gs_tbl.iop_tcnt              = pm8001_mr32(address, 0x10);
-       pm8001_ha->gs_tbl.reserved              = pm8001_mr32(address, 0x14);
-       pm8001_ha->gs_tbl.phy_state[0]  = pm8001_mr32(address, 0x18);
-       pm8001_ha->gs_tbl.phy_state[1]  = pm8001_mr32(address, 0x1C);
-       pm8001_ha->gs_tbl.phy_state[2]  = pm8001_mr32(address, 0x20);
-       pm8001_ha->gs_tbl.phy_state[3]  = pm8001_mr32(address, 0x24);
-       pm8001_ha->gs_tbl.phy_state[4]  = pm8001_mr32(address, 0x28);
-       pm8001_ha->gs_tbl.phy_state[5]  = pm8001_mr32(address, 0x2C);
-       pm8001_ha->gs_tbl.phy_state[6]  = pm8001_mr32(address, 0x30);
-       pm8001_ha->gs_tbl.phy_state[7]  = pm8001_mr32(address, 0x34);
-       pm8001_ha->gs_tbl.reserved1             = pm8001_mr32(address, 0x38);
-       pm8001_ha->gs_tbl.reserved2             = pm8001_mr32(address, 0x3C);
-       pm8001_ha->gs_tbl.reserved3             = pm8001_mr32(address, 0x40);
-       pm8001_ha->gs_tbl.recover_err_info[0]   = pm8001_mr32(address, 0x44);
-       pm8001_ha->gs_tbl.recover_err_info[1]   = pm8001_mr32(address, 0x48);
-       pm8001_ha->gs_tbl.recover_err_info[2]   = pm8001_mr32(address, 0x4C);
-       pm8001_ha->gs_tbl.recover_err_info[3]   = pm8001_mr32(address, 0x50);
-       pm8001_ha->gs_tbl.recover_err_info[4]   = pm8001_mr32(address, 0x54);
-       pm8001_ha->gs_tbl.recover_err_info[5]   = pm8001_mr32(address, 0x58);
-       pm8001_ha->gs_tbl.recover_err_info[6]   = pm8001_mr32(address, 0x5C);
-       pm8001_ha->gs_tbl.recover_err_info[7]   = pm8001_mr32(address, 0x60);
+       pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate   =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0   =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1   =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt          =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt           =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd               =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0]       =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1]       =
+                               pm8001_mr32(address, 0x1C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2]       =
+                               pm8001_mr32(address, 0x20);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3]       =
+                               pm8001_mr32(address, 0x24);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4]       =
+                               pm8001_mr32(address, 0x28);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5]       =
+                               pm8001_mr32(address, 0x2C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6]       =
+                               pm8001_mr32(address, 0x30);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7]       =
+                               pm8001_mr32(address, 0x34);
+       pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val     =
+                               pm8001_mr32(address, 0x38);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0]           =
+                               pm8001_mr32(address, 0x3C);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1]           =
+                               pm8001_mr32(address, 0x40);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0]        =
+                               pm8001_mr32(address, 0x44);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1]        =
+                               pm8001_mr32(address, 0x48);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2]        =
+                               pm8001_mr32(address, 0x4C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3]        =
+                               pm8001_mr32(address, 0x50);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4]        =
+                               pm8001_mr32(address, 0x54);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5]        =
+                               pm8001_mr32(address, 0x58);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6]        =
+                               pm8001_mr32(address, 0x5C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7]        =
+                               pm8001_mr32(address, 0x60);
 }
 
 /**
@@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int inbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
-       for (i = 0; i < inbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                u32 offset = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int outbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
-       for (i = 0; i < outbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                u32 offset = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
 {
-       int qn = 1;
        int i;
        u32 offsetib, offsetob;
        void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
        void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
 
-       pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd                     = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7   = 0;
-
-       pm8001_ha->main_cfg_tbl.upper_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd          = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
+
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.event_log_size  = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.event_log_option                = 0x01;
-       pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size               =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option             = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.iop_event_log_size      = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.iop_event_log_option            = 0x01;
-       pm8001_ha->main_cfg_tbl.fatal_err_interrupt             = 0x01;
-       for (i = 0; i < qn; i++) {
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size           =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option         = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt          = 0x01;
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
                pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
-                       pm8001_ha->memoryMap.region[IB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
-               pm8001_ha->memoryMap.region[IB].phys_addr_lo;
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].base_virt             =
-                       (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
                pm8001_ha->inbnd_q_tbl[i].total_length          =
-                       pm8001_ha->memoryMap.region[IB].total_len;
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
                pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].ci_virt               =
-                       pm8001_ha->memoryMap.region[CI].virt_ptr;
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
                offsetib = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
                        get_pci_bar_index(pm8001_mr32(addressib,
@@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
                pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
        }
-       for (i = 0; i < qn; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
                pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].base_virt            =
-                       (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
                pm8001_ha->outbnd_q_tbl[i].total_length         =
-                       pm8001_ha->memoryMap.region[OB].total_len;
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
                pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay       =
-                       0 | (10 << 16) | (0 << 24);
+                       0 | (10 << 16) | (i << 24);
                pm8001_ha->outbnd_q_tbl[i].pi_virt              =
-                       pm8001_ha->memoryMap.region[PI].virt_ptr;
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
                offsetob = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
                        get_pci_bar_index(pm8001_mr32(addressob,
@@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
        pm8001_mw32(address, 0x24,
-               pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
        pm8001_mw32(address, 0x28,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
        pm8001_mw32(address, 0x2C,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
        pm8001_mw32(address, 0x30,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
        pm8001_mw32(address, 0x34,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
        pm8001_mw32(address, 0x38,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid0_3);
        pm8001_mw32(address, 0x3C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid4_7);
        pm8001_mw32(address, 0x40,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid0_3);
        pm8001_mw32(address, 0x44,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid4_7);
        pm8001_mw32(address, 0x48,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid0_3);
        pm8001_mw32(address, 0x4C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid4_7);
        pm8001_mw32(address, 0x50,
-               pm8001_ha->main_cfg_tbl.upper_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
        pm8001_mw32(address, 0x54,
-               pm8001_ha->main_cfg_tbl.lower_event_log_addr);
-       pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
-       pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
+       pm8001_mw32(address, 0x58,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
+       pm8001_mw32(address, 0x5C,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
        pm8001_mw32(address, 0x60,
-               pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
        pm8001_mw32(address, 0x64,
-               pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
-       pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
+       pm8001_mw32(address, 0x68,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
        pm8001_mw32(address, 0x6C,
-               pm8001_ha->main_cfg_tbl.iop_event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
        pm8001_mw32(address, 0x70,
-               pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
 }
 
 /**
@@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
  */
 static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
 {
+       u8 i = 0;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       /* 8081 controllers need BAR shift to access MPI space
+       * as this is shared with BIOS data */
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        /* check the firmware status */
        if (-1 == check_fw_ready(pm8001_ha)) {
                PM8001_FAIL_DBG(pm8001_ha,
@@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
        read_outbnd_queue_table(pm8001_ha);
        /* update main config table ,inbound table and outbound table */
        update_main_config_table(pm8001_ha);
-       update_inbnd_queue_table(pm8001_ha, 0);
-       update_outbnd_queue_table(pm8001_ha, 0);
-       mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
-       /* 7->130ms, 34->500ms, 119->1.5s */
-       mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+       /* 8081 controller donot require these operations */
+       if (deviceid != 0x8081) {
+               mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
+               /* 7->130ms, 34->500ms, 119->1.5s */
+               mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       }
        /* notify firmware update finished and check initialization status */
        if (0 == mpi_init_check(pm8001_ha)) {
                PM8001_INIT_DBG(pm8001_ha,
@@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
        u32 max_wait_count;
        u32 value;
        u32 gst_len_mpistate;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        init_pci_device_addresses(pm8001_ha);
        /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
        table is stop */
@@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
  * the FW register status to the originated status.
  * @pm8001_ha: our hba card information
- * @signature: signature in host scratch pad0 register.
  */
 static int
-pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
+pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
 {
        u32     regVal, toggleVal;
        u32     max_wait_count;
        u32     regVal1, regVal2, regVal3;
+       u32     signature = 0x252acbcd; /* for host scratch pad0 */
        unsigned long flags;
 
        /* step1: Check FW is ready for soft reset */
@@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_iounmap - which maped when initialized.
  * @pm8001_ha: our hba card information
  */
-static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
 {
        s8 bar, logical = 0;
        for (bar = 0; bar < 6; bar++) {
@@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
@@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
@@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
+ * pm8001_mpi_msg_free_get - get the free message buffer for transfer
+ * inbound queue.
  * @circularQ: the inbound queue  we want to transfer to HBA.
  * @messageSize: the message size of this transfer, normally it is 64 bytes
  * @messagePtr: the pointer to message.
  */
-static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
                            u16 messageSize, void **messagePtr)
 {
        u32 offset, consumer_index;
@@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
        u8 bcCount = 1; /* only support single buffer */
 
        /* Checks is the requested message size can be allocated in this queue*/
-       if (messageSize > 64) {
+       if (messageSize > IOMB_SIZE_SPCV) {
                *messagePtr = NULL;
                return -1;
        }
@@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
                return -1;
        }
        /* get memory IOMB buffer address */
-       offset = circularQ->producer_idx * 64;
+       offset = circularQ->producer_idx * messageSize;
        /* increment to next bcCount element */
        circularQ->producer_idx = (circularQ->producer_idx + bcCount)
                                % PM8001_MPI_QUEUE;
@@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
 }
 
 /**
- * mpi_build_cmd- build the message queue for transfer, update the PI to FW
- * to tell the fw to get this message from IOMB.
+ * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
+ * FW to tell the fw to get this message from IOMB.
  * @pm8001_ha: our hba card information
  * @circularQ: the inbound queue we want to transfer to HBA.
  * @opCode: the operation code represents commands which LLDD and fw recognized.
  * @payload: the command payload of each operation command.
  */
-static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
                         struct inbound_queue_table *circularQ,
-                        u32 opCode, void *payload)
+                        u32 opCode, void *payload, u32 responseQueue)
 {
        u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
-       u32 responseQueue = 0;
        void *pMessage;
 
-       if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
+       if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+               &pMessage) < 0) {
                PM8001_IO_DBG(pm8001_ha,
                        pm8001_printk("No free mpi buffer\n"));
                return -1;
        }
        BUG_ON(!payload);
        /*Copy to the payload*/
-       memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
+       memcpy(pMessage, payload, (pm8001_ha->iomb_size -
+                               sizeof(struct mpi_msg_hdr)));
 
        /*Build the header*/
        Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
        pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
                circularQ->pi_offset, circularQ->producer_idx);
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx,
-               circularQ->consumer_index));
+               pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+                       responseQueue, opCode, circularQ->producer_idx,
+                       circularQ->consumer_index));
        return 0;
 }
 
-static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
                            struct outbound_queue_table *circularQ, u8 bc)
 {
        u32 producer_index;
@@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 
        msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
        pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
        if (pOutBoundMsgHeader != msgHeader) {
                PM8001_FAIL_DBG(pm8001_ha,
                        pm8001_printk("consumer_idx = %d msgHeader = %p\n",
@@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 }
 
 /**
- * mpi_msg_consume- get the MPI message from  outbound queue message table.
+ * pm8001_mpi_msg_consume- get the MPI message from outbound queue
+ * message table.
  * @pm8001_ha: our hba card information
  * @circularQ: the outbound queue  table.
  * @messagePtr1: the message contents of this outbound message.
  * @pBC: the message size.
  */
-static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                           struct outbound_queue_table *circularQ,
                           void **messagePtr1, u8 *pBC)
 {
@@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                        /*Get the pointer to the circular queue buffer element*/
                        msgHeader = (struct mpi_msg_hdr *)
                                (circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
                        /* read header */
                        header_tmp = pm8001_read_32(msgHeader);
                        msgHeader_tmp = cpu_to_le32(header_tmp);
@@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
        return MPI_IO_STATUS_BUSY;
 }
 
-static void pm8001_work_fn(struct work_struct *work)
+void pm8001_work_fn(struct work_struct *work)
 {
        struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
        struct pm8001_device *pm8001_dev;
@@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work)
        pm8001_dev = pw->data; /* Most stash device structure */
        if ((pm8001_dev == NULL)
         || ((pw->handler != IO_XFER_ERROR_BREAK)
-         && (pm8001_dev->dev_type == NO_DEVICE))) {
+         && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
                kfree(pw);
                return;
        }
@@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work)
        }       break;
        case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
                dev = pm8001_dev->sas_device;
-               pm8001_I_T_nexus_reset(dev);
+               pm8001_I_T_nexus_event_handler(dev);
                break;
        case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
                dev = pm8001_dev->sas_device;
@@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work)
        kfree(pw);
 }
 
-static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
                               int handler)
 {
        struct pm8001_work *pw;
@@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
        return ret;
 }
 
+static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
 /**
  * mpi_ssp_completion- process the event that FW response to the SSP request.
  * @pm8001_ha: our hba card information
@@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
                break;
        }
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("scsi_status = %x \n ",
+               pm8001_printk("scsi_status = %x\n ",
                psspPayload->ssp_resp_iu.status));
        spin_lock_irqsave(&t->task_state_lock, flags);
        t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
@@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        status = le32_to_cpu(psataPayload->status);
        tag = le32_to_cpu(psataPayload->tag);
 
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
        ccb = &pm8001_ha->ccb_info[tag];
        param = le32_to_cpu(psataPayload->param);
-       t = ccb->task;
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
        ts = &t->task_status;
-       pm8001_dev = ccb->device;
-       if (status)
+       if (!ts) {
                PM8001_FAIL_DBG(pm8001_ha,
-                       pm8001_printk("sata IO status 0x%x\n", status));
-       if (unlikely(!t || !t->lldd_task || !t->dev))
+                       pm8001_printk("ts null\n"));
                return;
+       }
 
        switch (status) {
        case IO_SUCCESS:
@@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                if (param == 0) {
                        ts->resp = SAS_TASK_COMPLETE;
                        ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm8001_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
                } else {
                        u8 len;
                        ts->resp = SAS_TASK_COMPLETE;
@@ -2423,6 +2655,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        u32 dev_id = le32_to_cpu(psataPayload->device_id);
        unsigned long flags;
 
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm8001_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
        pm8001_dev = ccb->device;
@@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        if (unlikely(!t || !t->lldd_task || !t->dev))
                return;
        ts = &t->task_status;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("port_id = %x,device_id = %x\n",
-               port_id, dev_id));
+       PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+               "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+               port_id, dev_id, tag, event));
        switch (event) {
        case IO_OVERFLOW:
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
@@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static void
-mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        struct set_dev_state_resp *pPayload =
                (struct set_dev_state_resp *)(piomb + 4);
@@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct get_nvm_data_resp *pPayload =
                (struct get_nvm_data_resp *)(piomb + 4);
@@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void
+pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct fw_control_ex    *fw_control_context;
        struct get_nvm_data_resp *pPayload =
@@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct local_phy_ctl_resp *pPayload =
                (struct local_phy_ctl_resp *)(piomb + 4);
@@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * while receive a broadcast(change) primitive just tell the sas
  * layer to discover the changed domain rather than the whole domain.
  */
-static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 {
        struct pm8001_phy *phy = &pm8001_ha->phy[i];
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 }
 
 /* Get the link rate speed  */
-static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
 {
        struct sas_phy *sas_phy = phy->sas_phy.phy;
 
@@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
  * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
  * buffer.
  */
-static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
        u8 *sas_addr)
 {
        if (phy->sas_phy.frame_rcvd[0] == 0x34
@@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
                ((phyId & 0x0F) << 4) | (port_id & 0x0F));
        payload.param0 = cpu_to_le32(param0);
        payload.param1 = cpu_to_le32(param1);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 }
 
 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
                        PHY_NOTIFY_ENABLE_SPINUP);
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_EDGE_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_FANOUT_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("fanout expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        default:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                " phy id = %d\n", port_id, phy_id));
        port->port_state =  portstate;
        port->port_attached = 1;
-       get_lrate_mode(phy, link_rate);
+       pm8001_get_lrate_mode(phy, link_rate);
        phy->phy_type |= PORT_TYPE_SATA;
        phy->phy_attached = 1;
        phy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                sizeof(struct dev_to_host_fis));
        phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
        phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
-       phy->identify.device_type = SATA_DEV;
+       phy->identify.device_type = SAS_SATA_DEV;
        pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
        spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
        pm8001_bytes_dmaed(pm8001_ha, phy_id);
@@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * mpi_reg_resp -process register device ID response.
+ * pm8001_mpi_reg_resp -process register device ID response.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  *
@@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * has assigned, from now,inter-communication with FW is no longer using the
  * SAS address, use device ID which FW assigned.
  */
-static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+/**
+ * fw_flash_update_resp - Response from FW for flash update command.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        u32 status;
        struct fw_control_ex    fw_control_context;
@@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
                break;
        }
        ccb->fw_control_context->fw_control->retcode = status;
-       pci_free_consistent(pm8001_ha->pdev,
-                       fw_control_context.len,
-                       fw_control_context.virtAddr,
-                       fw_control_context.phys_addr);
        complete(pm8001_ha->nvmd_completion);
        ccb->task = NULL;
        ccb->ccb_tag = 0xFFFFFFFF;
@@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 {
        u32 status;
        int i;
@@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        return 0;
 }
 
-static int
-mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct sas_task *t;
        struct pm8001_ccb_info *ccb;
@@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        u32 status ;
        u32 tag, scp;
        struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
 
        struct task_abort_resp *pPayload =
                (struct task_abort_resp *)(piomb + 4);
 
        status = le32_to_cpu(pPayload->status);
        tag = le32_to_cpu(pPayload->tag);
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TAG NULL. RETURNING !!!"));
+               return -1;
+       }
+
        scp = le32_to_cpu(pPayload->scp);
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk(" status = 0x%x\n", status));
-       if (t == NULL)
+       pm8001_dev = ccb->device; /* retrieve device */
+
+       if (!t) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TASK NULL. RETURNING !!!"));
                return -1;
+       }
        ts = &t->task_status;
        if (status != 0)
                PM8001_FAIL_DBG(pm8001_ha,
@@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        spin_unlock_irqrestore(&t->task_state_lock, flags);
        pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
        mb();
-       t->task_done(t);
+
+       if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) {
+               pm8001_tag_free(pm8001_ha, tag);
+               sas_free_task(t);
+               /* clear the flag */
+               pm8001_dev->id &= 0xBFFFFFFF;
+       } else
+               t->task_done(t);
+
        return 0;
 }
 
@@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_LOCAL_PHY_CNTRL:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
-               mpi_local_phy_ctl(pm8001_ha, piomb);
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEV_REGIST:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_DEV_REGIST\n"));
-               mpi_reg_resp(pm8001_ha, piomb);
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEREG_DEV:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("unregister the device\n"));
-               mpi_dereg_resp(pm8001_ha, piomb);
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEV_HANDLE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_FW_FLASH_UPDATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
-               mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GPIO_RESPONSE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_GENERAL_EVENT:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
-               mpi_general_event(pm8001_ha, piomb);
+               pm8001_mpi_general_event(pm8001_ha, piomb);
                break;
        case OPC_OUB_SSP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SATA_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SAS_DIAG_MODE_START_END:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SMP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
-               mpi_get_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
-               mpi_set_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEVICE_HANDLE_REMOVAL:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
-               mpi_set_dev_state_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static int process_oq(struct pm8001_hba_info *pm8001_ha)
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
        struct outbound_queue_table *circularQ;
        void *pMsg1 = NULL;
@@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
        unsigned long flags;
 
        spin_lock_irqsave(&pm8001_ha->lock, flags);
-       circularQ = &pm8001_ha->outbnd_q_tbl[0];
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
        do {
-               ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
                if (MPI_IO_STATUS_SUCCESS == ret) {
                        /* process the outbound message */
                        process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
                        /* free the message from the outbound circular buffer */
-                       mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
                }
                if (MPI_IO_STATUS_BUSY == ret) {
                        /* Update the producer index from SPC */
@@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = {
        [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
        [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
 };
-static void
+void
 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
 {
        int i;
@@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
        smp_cmd.long_smp_req.long_resp_size =
                cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
        build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
        return 0;
 
 err_out_2:
@@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
                ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
                ssp_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
        return ret;
 }
 
@@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        u32 ATAP = 0x0;
        u32 dir;
        struct inbound_queue_table *circularQ;
+       unsigned long flags;
        u32  opc = OPC_INB_SATA_HOST_OPSTART;
        memset(&sata_cmd, 0, sizeof(sata_cmd));
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
                }
        }
-       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
                ncg_tag = hdr_tag;
+       }
        dir = data_dir_flags[task->data_dir] << 8;
        sata_cmd.tag = cpu_to_le32(tag);
        sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                sata_cmd.len = cpu_to_le32(task->total_xfer_len);
                sata_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
        return ret;
 }
 
@@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
        payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
                LINKMODE_AUTO | LINKRATE_15 |
                LINKRATE_30 | LINKRATE_60 | phy_id);
-       payload.sas_identify.dev_type = SAS_END_DEV;
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
        payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
        memcpy(payload.sas_identify.sas_addr,
                pm8001_ha->sas_addr, SAS_ADDR_SIZE);
        payload.sas_identify.phy_id = phy_id;
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
@@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
  * @num: the inbound queue number
  * @phy_id: the phy id which we wanted to start up.
  */
-static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        u8 phy_id)
 {
        struct phy_stop_req payload;
@@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        memset(&payload, 0, sizeof(payload));
        payload.tag = cpu_to_le32(tag);
        payload.phy_id = cpu_to_le32(phy_id);
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
 static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 flag)
@@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        if (flag == 1)
                stp_sspsmp_sata = 0x02; /*direct attached sata */
        else {
-               if (pm8001_dev->dev_type == SATA_DEV)
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
                        stp_sspsmp_sata = 0x00; /* stp*/
-               else if (pm8001_dev->dev_type == SAS_END_DEV ||
-                       pm8001_dev->dev_type == EDGE_DEV ||
-                       pm8001_dev->dev_type == FANOUT_DEV)
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        stp_sspsmp_sata = 0x01; /*ssp or smp*/
        }
        if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
@@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
        memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
                SAS_ADDR_SIZE);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
-static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        u32 device_id)
 {
        struct dereg_dev_req payload;
@@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        payload.device_id = cpu_to_le32(device_id);
        PM8001_MSG_DBG(pm8001_ha,
                pm8001_printk("unregister device device_id = %d\n", device_id));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(1);
        payload.phyop_phyid =
                cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
  * @stat: stat.
  */
 static irqreturn_t
-pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-       pm8001_chip_interrupt_disable(pm8001_ha);
-       process_oq(pm8001_ha);
-       pm8001_chip_interrupt_enable(pm8001_ha);
+       pm8001_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm8001_chip_interrupt_enable(pm8001_ha, vec);
        return IRQ_HANDLED;
 }
 
@@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
                task_abort.device_id = cpu_to_le32(dev_id);
                task_abort.tag = cpu_to_le32(cmd_tag);
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
        return ret;
 }
 
@@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
  * @task: the task we wanted to aborted.
  * @flag: the abort flag.
  */
-static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
 {
        u32 opc, device_id;
        int rc = TMF_RESP_FUNC_FAILED;
-       PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
-               " = %x", cmd_tag, task_tag));
-       if (pm8001_dev->dev_type == SAS_END_DEV)
+       PM8001_EH_DBG(pm8001_ha,
+               pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
+                       cmd_tag, task_tag));
+       if (pm8001_dev->dev_type == SAS_END_DEVICE)
                opc = OPC_INB_SSP_ABORT;
-       else if (pm8001_dev->dev_type == SATA_DEV)
+       else if (pm8001_dev->dev_type == SAS_SATA_DEV)
                opc = OPC_INB_SATA_ABORT;
        else
                opc = OPC_INB_SMP_ABORT;/* SMP */
@@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
  * @ccb: the ccb information.
  * @tmf: task management function.
  */
-static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
 {
        struct sas_task *task = ccb->task;
@@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
        sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
        return ret;
 }
 
-static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_GET_NVMD_DATA;
@@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
+       fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
        fw_control_context->len = ioctl_payload->length;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memset(&nvmd_req, 0, sizeof(nvmd_req));
@@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
-static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_SET_NVMD_DATA;
@@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
                return -ENOMEM;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
-               ioctl_payload->func_specific,
+               &ioctl_payload->func_specific,
                ioctl_payload->length);
        memset(&nvmd_req, 0, sizeof(nvmd_req));
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
@@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information.
  * @fw_flash_updata_info: firmware flash update param
  */
-static int
+int
 pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
        void *fw_flash_updata_info, u32 tag)
 {
@@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
        payload.sgl_addr_hi =
                cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
-static int
+int
 pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
@@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        int rc;
        u32 tag;
        struct pm8001_ccb_info *ccb;
-       void *buffer = NULL;
-       dma_addr_t phys_addr;
-       u32 phys_addr_hi;
-       u32 phys_addr_lo;
+       void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
+       dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
        struct pm8001_ioctl_payload *ioctl_payload = payload;
 
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
-       if (fw_control->len != 0) {
-               if (pm8001_mem_alloc(pm8001_ha->pdev,
-                       (void **)&buffer,
-                       &phys_addr,
-                       &phys_addr_hi,
-                       &phys_addr_lo,
-                       fw_control->len, 0) != 0) {
-                               PM8001_FAIL_DBG(pm8001_ha,
-                                       pm8001_printk("Mem alloc failure\n"));
-                               kfree(fw_control_context);
-                               return -ENOMEM;
-               }
-       }
+       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
        memcpy(buffer, fw_control->buffer, fw_control->len);
        flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
        flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        flash_update_info.total_image_len = fw_control->size;
        fw_control_context->fw_control = fw_control;
        fw_control_context->virtAddr = buffer;
+       fw_control_context->phys_addr = phys_addr;
        fw_control_context->len = fw_control->len;
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
        if (rc) {
@@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        return rc;
 }
 
-static int
+int
 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 state)
 {
@@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(tag);
        payload.device_id = cpu_to_le32(pm8001_dev->device_id);
        payload.nds = cpu_to_le32(state);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
        payload.SSAHOLT = cpu_to_le32(0xd << 25);
        payload.sata_hol_tmo = cpu_to_le32(80);
        payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
        .set_dev_state_req      = pm8001_chip_set_dev_state_req,
        .sas_re_init_req        = pm8001_chip_sas_re_initialization,
 };
-
index d437309cb1e1c30de4f75aaafc7fb8dd0884a724..d7c1e2034226ea7667201a2a1474400f52994b5d 100644 (file)
 #define LINKRATE_30                    (0x02 << 8)
 #define LINKRATE_60                    (0x04 << 8)
 
+/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
+#define GSM_SM_BASE                    0x4F0000
 struct mpi_msg_hdr{
        __le32  header; /* Bits [11:0]  - Message operation code */
        /* Bits [15:12] - Message Category */
@@ -298,7 +300,7 @@ struct local_phy_ctl_resp {
 
 
 #define OP_BITS 0x0000FF00
-#define ID_BITS 0x0000000F
+#define ID_BITS 0x000000FF
 
 /*
  * brief the data structure of PORT Control Command
index 3d5e522e00fc8ae93a4c9344731f960fa42c0006..e4b9bc7f5410fa5e38a9df530cba183652f1962a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 static struct scsi_transport_template *pm8001_stt;
 
+/**
+ * chip info structure to identify chip key functionality as
+ * encryption available/not, no of ports, hw specific function ref
+ */
 static const struct pm8001_chip_info pm8001_chips[] = {
-       [chip_8001] = {  8, &pm8001_8001_dispatch,},
+       [chip_8001] = {0,  8, &pm8001_8001_dispatch,},
+       [chip_8008] = {0,  8, &pm8001_80xx_dispatch,},
+       [chip_8009] = {1,  8, &pm8001_80xx_dispatch,},
+       [chip_8018] = {0,  16, &pm8001_80xx_dispatch,},
+       [chip_8019] = {1,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 }
 
 #ifdef PM8001_USE_TASKLET
+
+/**
+ * tasklet for 64 msi-x interrupt handler
+ * @opaque: the passed general host adapter struct
+ * Note: pm8001_tasklet is common for pm8001 & pm80xx
+ */
 static void pm8001_tasklet(unsigned long opaque)
 {
        struct pm8001_hba_info *pm8001_ha;
+       u32 vec;
        pm8001_ha = (struct pm8001_hba_info *)opaque;
        if (unlikely(!pm8001_ha))
                BUG_ON(1);
-       PM8001_CHIP_DISP->isr(pm8001_ha);
+       vec = pm8001_ha->int_vector;
+       PM8001_CHIP_DISP->isr(pm8001_ha, vec);
+}
+#endif
+
+static struct  pm8001_hba_info *outq_to_hba(u8 *outq)
+{
+       return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
 }
+
+/**
+ * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
+ * It obtains the vector number and calls the equivalent bottom
+ * half or services directly.
+ * @opaque: the passed outbound queue/vector. Host structure is
+ * retrieved from the same.
+ */
+static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
+{
+       struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
+       u8 outq = *(u8 *)opaque;
+       irqreturn_t ret = IRQ_HANDLED;
+       if (unlikely(!pm8001_ha))
+               return IRQ_NONE;
+       if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+               return IRQ_NONE;
+       pm8001_ha->int_vector = outq;
+#ifdef PM8001_USE_TASKLET
+       tasklet_schedule(&pm8001_ha->tasklet);
+#else
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
 #endif
+       return ret;
+}
 
+/**
+ * pm8001_interrupt_handler_intx - main INTx interrupt handler.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ */
 
- /**
-  * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
-  * dispatcher to handle each case.
-  * @irq: irq number.
-  * @opaque: the passed general host adapter struct
-  */
-static irqreturn_t pm8001_interrupt(int irq, void *opaque)
+static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
 {
        struct pm8001_hba_info *pm8001_ha;
        irqreturn_t ret = IRQ_HANDLED;
-       struct sas_ha_struct *sha = opaque;
+       struct sas_ha_struct *sha = dev_id;
        pm8001_ha = sha->lldd_ha;
        if (unlikely(!pm8001_ha))
                return IRQ_NONE;
        if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
                return IRQ_NONE;
+
+       pm8001_ha->int_vector = 0;
 #ifdef PM8001_USE_TASKLET
        tasklet_schedule(&pm8001_ha->tasklet);
 #else
-       ret = PM8001_CHIP_DISP->isr(pm8001_ha);
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
 #endif
        return ret;
 }
@@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
  * @pm8001_ha:our hba structure.
  *
  */
-static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+                       const struct pci_device_id *ent)
 {
        int i;
        spin_lock_init(&pm8001_ha->lock);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("pm8001_alloc: PHY:%x\n",
+                               pm8001_ha->chip->n_phy));
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
                pm8001_phy_init(pm8001_ha, i);
                pm8001_ha->port[i].wide_port_phymap = 0;
@@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
        pm8001_ha->memoryMap.region[IOP].alignment = 32;
 
-       /* MPI Memory region 3 for consumer Index of inbound queues */
-       pm8001_ha->memoryMap.region[CI].num_elements = 1;
-       pm8001_ha->memoryMap.region[CI].element_size = 4;
-       pm8001_ha->memoryMap.region[CI].total_len = 4;
-       pm8001_ha->memoryMap.region[CI].alignment = 4;
-
-       /* MPI Memory region 4 for producer Index of outbound queues */
-       pm8001_ha->memoryMap.region[PI].num_elements = 1;
-       pm8001_ha->memoryMap.region[PI].element_size = 4;
-       pm8001_ha->memoryMap.region[PI].total_len = 4;
-       pm8001_ha->memoryMap.region[PI].alignment = 4;
-
-       /* MPI Memory region 5 inbound queues */
-       pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[IB].element_size = 64;
-       pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[IB].alignment = 64;
-
-       /* MPI Memory region 6 outbound queues */
-       pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[OB].element_size = 64;
-       pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[OB].alignment = 64;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               /* MPI Memory region 3 for consumer Index of inbound queues */
+               pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[CI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[CI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[CI+i].alignment = 4;
+
+               if ((ent->driver_data) != chip_8001) {
+                       /* MPI Memory region 5 inbound queues */
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+               } else {
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+               }
+       }
+
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               /* MPI Memory region 4 for producer Index of outbound queues */
+               pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[PI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[PI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[PI+i].alignment = 4;
+
+               if (ent->driver_data != chip_8001) {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+               } else {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+               }
 
+       }
        /* Memory region write DMA*/
        pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
        pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
                sizeof(struct pm8001_ccb_info);
 
+       /* Memory region for fw flash */
+       pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_mem_alloc(pm8001_ha->pdev,
                        &pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
 
        pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
        for (i = 0; i < PM8001_MAX_DEVICES; i++) {
-               pm8001_ha->devices[i].dev_type = NO_DEVICE;
+               pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
                pm8001_ha->devices[i].id = i;
                pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
                pm8001_ha->devices[i].running_req = 0;
@@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
                                ioremap(pm8001_ha->io_mem[logicalBar].membase,
                                pm8001_ha->io_mem[logicalBar].memsize);
                        PM8001_INIT_DBG(pm8001_ha,
-                               pm8001_printk("PCI: bar %d, logicalBar %d "
-                               "virt_addr=%lx,len=%d\n", bar, logicalBar,
-                               (unsigned long)
-                               pm8001_ha->io_mem[logicalBar].memvirtaddr,
+                               pm8001_printk("PCI: bar %d, logicalBar %d ",
+                               bar, logicalBar));
+                       PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                               "base addr %llx virt_addr=%llx len=%d\n",
+                               (u64)pm8001_ha->io_mem[logicalBar].membase,
+                               (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
                                pm8001_ha->io_mem[logicalBar].memsize));
                } else {
                        pm8001_ha->io_mem[logicalBar].membase   = 0;
@@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
  * @shost: scsi host struct which has been initialized before.
  */
 static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
-                                               u32 chip_id,
-                                               struct Scsi_Host *shost)
+                                const struct pci_device_id *ent,
+                               struct Scsi_Host *shost)
+
 {
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
 
        pm8001_ha->pdev = pdev;
        pm8001_ha->dev = &pdev->dev;
-       pm8001_ha->chip_id = chip_id;
+       pm8001_ha->chip_id = ent->driver_data;
        pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
        pm8001_ha->irq = pdev->irq;
        pm8001_ha->sas = sha;
@@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
        pm8001_ha->id = pm8001_id++;
        pm8001_ha->logging_level = 0x01;
        sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
+       /* IOMB size is 128 for 8088/89 controllers */
+       if (pm8001_ha->chip_id != chip_8001)
+               pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
+       else
+               pm8001_ha->iomb_size = IOMB_SIZE_SPC;
+
 #ifdef PM8001_USE_TASKLET
+       /**
+       * default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler
+       **/
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-               (unsigned long)pm8001_ha);
+                       (unsigned long)pm8001_ha);
 #endif
        pm8001_ioremap(pm8001_ha);
-       if (!pm8001_alloc(pm8001_ha))
+       if (!pm8001_alloc(pm8001_ha, ent))
                return pm8001_ha;
        pm8001_free(pm8001_ha);
        return NULL;
@@ -512,21 +605,50 @@ static void  pm8001_post_sas_ha_init(struct Scsi_Host *shost,
  */
 static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 {
-       u8 i;
+       u8 i, j;
 #ifdef PM8001_READ_VPD
+       /* For new SPC controllers WWN is stored in flash vpd
+       *  For SPC/SPCve controllers WWN is stored in EEPROM
+       *  For Older SPC WWN is stored in NVMD
+       */
        DECLARE_COMPLETION_ONSTACK(completion);
        struct pm8001_ioctl_payload payload;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
        pm8001_ha->nvmd_completion = &completion;
-       payload.minor_function = 0;
-       payload.length = 128;
-       payload.func_specific = kzalloc(128, GFP_KERNEL);
+
+       if (pm8001_ha->chip_id == chip_8001) {
+               if (deviceid == 0x8081) {
+                       payload.minor_function = 4;
+                       payload.length = 4096;
+               } else {
+                       payload.minor_function = 0;
+                       payload.length = 128;
+               }
+       } else {
+               payload.minor_function = 1;
+               payload.length = 4096;
+       }
+       payload.offset = 0;
+       payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
        PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
        wait_for_completion(&completion);
+
+       for (i = 0, j = 0; i <= 7; i++, j++) {
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (deviceid == 0x8081)
+                               pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x704 + i];
+               } else
+                       pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x804 + i];
+       }
+
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
-               memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
-                       SAS_ADDR_SIZE);
+               memcpy(&pm8001_ha->phy[i].dev_sas_addr,
+                       pm8001_ha->sas_addr, SAS_ADDR_SIZE);
                PM8001_INIT_DBG(pm8001_ha,
-                       pm8001_printk("phy %d sas_addr = %016llx \n", i,
+                       pm8001_printk("phy %d sas_addr = %016llx\n", i,
                        pm8001_ha->phy[i].dev_sas_addr));
        }
 #else
@@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
  * @chip_info: our ha struct.
  * @irq_handler: irq_handler
  */
-static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
-       irq_handler_t irq_handler)
+static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
 {
        u32 i = 0, j = 0;
-       u32 number_of_intr = 1;
+       u32 number_of_intr;
        int flag = 0;
        u32 max_entry;
        int rc;
+       static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
+
+       /* SPCv controllers supports 64 msi-x */
+       if (pm8001_ha->chip_id == chip_8001) {
+               number_of_intr = 1;
+               flag |= IRQF_DISABLED;
+       } else {
+               number_of_intr = PM8001_MAX_MSIX_VEC;
+               flag &= ~IRQF_SHARED;
+               flag |= IRQF_DISABLED;
+       }
+
        max_entry = sizeof(pm8001_ha->msix_entries) /
                sizeof(pm8001_ha->msix_entries[0]);
-       flag |= IRQF_DISABLED;
        for (i = 0; i < max_entry ; i++)
                pm8001_ha->msix_entries[i].entry = i;
        rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
                number_of_intr);
        pm8001_ha->number_of_intr = number_of_intr;
        if (!rc) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "pci_enable_msix request ret:%d no of intr %d\n",
+                                       rc, pm8001_ha->number_of_intr));
+
+               for (i = 0; i < number_of_intr; i++)
+                       pm8001_ha->outq[i] = i;
+
                for (i = 0; i < number_of_intr; i++) {
+                       snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
+                                       DRV_NAME"%d", i);
                        if (request_irq(pm8001_ha->msix_entries[i].vector,
-                               irq_handler, flag, DRV_NAME,
-                               SHOST_TO_SAS_HA(pm8001_ha->shost))) {
+                               pm8001_interrupt_handler_msix, flag,
+                               intr_drvname[i], &pm8001_ha->outq[i])) {
                                for (j = 0; j < i; j++)
                                        free_irq(
                                        pm8001_ha->msix_entries[j].vector,
-                                       SHOST_TO_SAS_HA(pm8001_ha->shost));
+                                       &pm8001_ha->outq[j]);
                                pci_disable_msix(pm8001_ha->pdev);
                                break;
                        }
@@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
 {
        struct pci_dev *pdev;
-       irq_handler_t irq_handler = pm8001_interrupt;
        int rc;
 
        pdev = pm8001_ha->pdev;
 
 #ifdef PM8001_USE_MSIX
        if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
-               return pm8001_setup_msix(pm8001_ha, irq_handler);
-       else
+               return pm8001_setup_msix(pm8001_ha);
+       else {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MSIX not supported!!!\n"));
                goto intx;
+       }
 #endif
 
 intx:
        /* initialize the INT-X interrupt */
-       rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
-               SHOST_TO_SAS_HA(pm8001_ha->shost));
+       rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+               DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
        return rc;
 }
 
@@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
 {
        unsigned int rc;
        u32     pci_reg;
+       u8      i = 0;
        struct pm8001_hba_info *pm8001_ha;
        struct Scsi_Host *shost = NULL;
        const struct pm8001_chip_info *chip;
 
        dev_printk(KERN_INFO, &pdev->dev,
-               "pm8001: driver version %s\n", DRV_VERSION);
+               "pm80xx: driver version %s\n", DRV_VERSION);
        rc = pci_enable_device(pdev);
        if (rc)
                goto err_out_enable;
@@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
                goto err_out_free;
        }
        pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
-       pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
+       /* ent->driver variable is used to differentiate between controllers */
+       pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
        if (!pm8001_ha) {
                rc = -ENOMEM;
                goto err_out_free;
        }
        list_add_tail(&pm8001_ha->list, &hba_list);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "chip_init failed [ret: %d]\n", rc));
                goto err_out_ha_free;
+       }
 
        rc = scsi_add_host(shost, &pdev->dev);
        if (rc)
                goto err_out_ha_free;
        rc = pm8001_request_irq(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "pm8001_request_irq failed [ret: %d]\n", rc));
                goto err_out_shost;
+       }
+
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+               /* setup thermal configuration. */
+               pm80xx_set_thermal_config(pm8001_ha);
+       }
 
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
        pm8001_init_sas_add(pm8001_ha);
        pm8001_post_sas_ha_init(shost, chip);
        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
        sas_remove_host(pm8001_ha->shost);
        list_del(&pm8001_ha->list);
        scsi_remove_host(pm8001_ha->shost);
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                printk(KERN_ERR " PCI PM not supported\n");
                return -ENODEV;
        }
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
        struct pm8001_hba_info *pm8001_ha;
        int rc;
+       u8 i = 0;
        u32 device_state;
        pm8001_ha = sha->lldd_ha;
        device_state = pdev->current_state;
@@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        if (rc)
                goto err_out_disable;
 
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       /* chip soft rst only for spc */
+       if (pm8001_ha->chip_id == chip_8001) {
+               PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("chip soft reset successful\n"));
+       }
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+
+       /* disable all the interrupt bits */
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       #ifdef PM8001_USE_TASKLET
+#ifdef PM8001_USE_TASKLET
+       /* default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler */
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-                   (unsigned long)pm8001_ha);
-       #endif
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+                       (unsigned long)pm8001_ha);
+#endif
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+       }
        scsi_unblock_requests(pm8001_ha->shost);
        return 0;
 
@@ -843,14 +1018,45 @@ err_out_enable:
        return rc;
 }
 
+/* update of pci device, vendor id and driver data with
+ * unique value for each of the controller
+ */
 static struct pci_device_id pm8001_pci_table[] = {
-       {
-               PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
-       },
+       { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
        {
                PCI_DEVICE(0x117c, 0x0042),
                .driver_data = chip_8001
        },
+       /* Support for SPC/SPCv/SPCve controllers */
+       { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
        {} /* terminate list */
 };
 
@@ -870,7 +1076,7 @@ static int __init pm8001_init(void)
 {
        int rc = -ENOMEM;
 
-       pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+       pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
        if (!pm8001_wq)
                goto err;
 
@@ -902,7 +1108,8 @@ module_init(pm8001_init);
 module_exit(pm8001_exit);
 
 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
-MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
+MODULE_DESCRIPTION(
+               "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
index b961112395d5e053145ba6f83ed4bf7e5058c5c5..a85d73de7c80c730260362ae56a7688189459ee6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
        clear_bit(tag, bitmap);
 }
 
-static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
 {
        pm8001_tag_clear(pm8001_ha, tag);
 }
@@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                break;
        case PHY_FUNC_GET_EVENTS:
                spin_lock_irqsave(&pm8001_ha->lock, flags);
-               if (-1 == pm8001_bar4_shift(pm8001_ha,
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (-1 == pm8001_bar4_shift(pm8001_ha,
                                        (phy_id < 4) ? 0x30000 : 0x40000)) {
-                       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
-                       return -EINVAL;
+                               spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+                               return -EINVAL;
+                       }
                }
                {
                        struct sas_phy *phy = sas_phy->phy;
@@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                        phy->loss_of_dword_sync_count = qp[3];
                        phy->phy_reset_problem_count = qp[4];
                }
-               pm8001_bar4_shift(pm8001_ha, 0);
+               if (pm8001_ha->chip_id == chip_8001)
+                       pm8001_bar4_shift(pm8001_ha, 0);
                spin_unlock_irqrestore(&pm8001_ha->lock, flags);
                return 0;
        default:
@@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost)
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        pm8001_ha = sha->lldd_ha;
-       PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+       /* SAS_RE_INITIALIZATION not available in SPCv/ve */
+       if (pm8001_ha->chip_id == chip_8001)
+               PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
        for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
                PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
 }
@@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
   * @tmf: the task management IU
   */
 #define DEV_IS_GONE(pm8001_dev)        \
-       ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
+       ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
 static int pm8001_task_exec(struct sas_task *task, const int num,
        gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
 {
@@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
                struct task_status_struct *tsm = &t->task_status;
                tsm->resp = SAS_TASK_UNDELIVERED;
                tsm->stat = SAS_PHY_DOWN;
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        t->task_done(t);
                return 0;
        }
@@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
 {
        u32 dev;
        for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
-               if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
+               if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        pm8001_ha->devices[dev].id = dev;
                        return &pm8001_ha->devices[dev];
                }
@@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
        }
        return NULL;
 }
+/**
+  * pm8001_find_dev - find a matching pm8001_device
+  * @pm8001_ha: our hba card information
+  */
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id)
+{
+       u32 dev;
+       for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+               if (pm8001_ha->devices[dev].device_id == device_id)
+                       return &pm8001_ha->devices[dev];
+       }
+       if (dev == PM8001_MAX_DEVICES) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
+                               "DEVICE FOUND !!!\n"));
+       }
+       return NULL;
+}
 
 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
 {
        u32 id = pm8001_dev->id;
        memset(pm8001_dev, 0, sizeof(*pm8001_dev));
        pm8001_dev->id = id;
-       pm8001_dev->dev_type = NO_DEVICE;
+       pm8001_dev->dev_type = SAS_PHY_UNUSED;
        pm8001_dev->device_id = PM8001_MAX_DEVICES;
        pm8001_dev->sas_device = NULL;
 }
@@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
                        res = -1;
                }
        } else {
-               if (dev->dev_type == SATA_DEV) {
+               if (dev->dev_type == SAS_SATA_DEV) {
                        pm8001_device->attached_phy =
                                dev->rphy->identify.phy_identifier;
                                flag = 1; /* directly sata*/
@@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
        PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
        spin_unlock_irqrestore(&pm8001_ha->lock, flags);
        wait_for_completion(&completion);
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                msleep(50);
        pm8001_ha->flags = PM8001F_RUN_TIME;
        return 0;
@@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev)
        return pm8001_dev_found_notify(dev);
 }
 
-static void pm8001_task_done(struct sas_task *task)
+void pm8001_task_done(struct sas_task *task)
 {
        if (!del_timer(&task->slow_task->timer))
                return;
@@ -904,7 +927,7 @@ void pm8001_open_reject_retry(
                struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
 
                pm8001_dev = ccb->device;
-               if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
+               if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
                        continue;
                if (!device_to_close) {
                        uintptr_t d = (uintptr_t)pm8001_dev
@@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
        return rc;
 }
 
+/*
+* This function handle the IT_NEXUS_XXX event or completion
+* status code for SSP/SATA/SMP I/O request.
+*/
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct pm8001_device *pm8001_dev;
+       struct pm8001_hba_info *pm8001_ha;
+       struct sas_phy *phy;
+       u32 device_id = 0;
+
+       if (!dev || !dev->lldd_dev)
+               return -1;
+
+       pm8001_dev = dev->lldd_dev;
+       device_id = pm8001_dev->device_id;
+       pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+       PM8001_EH_DBG(pm8001_ha,
+                       pm8001_printk("I_T_Nexus handler invoked !!"));
+
+       phy = sas_get_local_phy(dev);
+
+       if (dev_is_sata(dev)) {
+               DECLARE_COMPLETION_ONSTACK(completion_setstate);
+               if (scsi_is_sas_phy_local(phy)) {
+                       rc = 0;
+                       goto out;
+               }
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+               pm8001_dev->setds_completion = &completion_setstate;
+
+               wait_for_completion(&completion_setstate);
+       } else {
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+       }
+       PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+               pm8001_dev->device_id, rc));
+out:
+       sas_put_local_phy(phy);
+
+       return rc;
+}
 /* mandatory SAM-3, the task reset the specified LUN*/
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
 {
index 11008205aeb331c9b86be09deb8c26733cd133b2..570819464d90b40568477eac1e4c283ae10c3169 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -57,8 +57,8 @@
 #include <linux/atomic.h>
 #include "pm8001_defs.h"
 
-#define DRV_NAME               "pm8001"
-#define DRV_VERSION            "0.1.36"
+#define DRV_NAME               "pm80xx"
+#define DRV_VERSION            "0.1.37"
 #define PM8001_FAIL_LOGGING    0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING    0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING    0x04 /* discovery layer logging */
@@ -66,8 +66,8 @@
 #define PM8001_EH_LOGGING      0x10 /* libsas EH function logging*/
 #define PM8001_IOCTL_LOGGING   0x20 /* IOCTL message logging */
 #define PM8001_MSG_LOGGING     0x40 /* misc message logging */
-#define pm8001_printk(format, arg...)  printk(KERN_INFO "%s %d:" format,\
-                               __func__, __LINE__, ## arg)
+#define pm8001_printk(format, arg...)  printk(KERN_INFO "pm80xx %s %d:" \
+                       format, __func__, __LINE__, ## arg)
 #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)  \
 do {                                           \
        if (unlikely(HBA->logging_level & LEVEL))       \
@@ -103,11 +103,12 @@ do {                                              \
 #define PM8001_READ_VPD
 
 
-#define DEV_IS_EXPANDER(type)  ((type == EDGE_DEV) || (type == FANOUT_DEV))
+#define DEV_IS_EXPANDER(type)  ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define PM8001_NAME_LENGTH             32/* generic length of strings */
 extern struct list_head hba_list;
 extern const struct pm8001_dispatch pm8001_8001_dispatch;
+extern const struct pm8001_dispatch pm8001_80xx_dispatch;
 
 struct pm8001_hba_info;
 struct pm8001_ccb_info;
@@ -131,15 +132,15 @@ struct pm8001_ioctl_payload {
 struct pm8001_dispatch {
        char *name;
        int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
-       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
+       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
        int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
-       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
+       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
-       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
+       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
        int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
                struct pm8001_ccb_info *ccb);
@@ -173,6 +174,7 @@ struct pm8001_dispatch {
 };
 
 struct pm8001_chip_info {
+       u32     encrypt;
        u32     n_phy;
        const struct pm8001_dispatch    *dispatch;
 };
@@ -204,7 +206,7 @@ struct pm8001_phy {
 };
 
 struct pm8001_device {
-       enum sas_dev_type       dev_type;
+       enum sas_device_type    dev_type;
        struct domain_device    *sas_device;
        u32                     attached_phy;
        u32                     id;
@@ -256,7 +258,20 @@ struct mpi_mem_req {
        struct mpi_mem          region[USI_MAX_MEMCNT];
 };
 
-struct main_cfg_table {
+struct encrypt {
+       u32     cipher_mode;
+       u32     sec_mode;
+       u32     status;
+       u32     flag;
+};
+
+struct sas_phy_attribute_table {
+       u32     phystart1_16[16];
+       u32     outbound_hw_event_pid1_16[16];
+};
+
+union main_cfg_table {
+       struct {
        u32                     signature;
        u32                     interface_rev;
        u32                     firmware_rev;
@@ -292,19 +307,69 @@ struct main_cfg_table {
        u32                     fatal_err_dump_length1;
        u32                     hda_mode_flag;
        u32                     anolog_setup_table_offset;
+       u32                     rsvd[4];
+       } pm8001_tbl;
+
+       struct {
+       u32                     signature;
+       u32                     interface_rev;
+       u32                     firmware_rev;
+       u32                     max_out_io;
+       u32                     max_sgl;
+       u32                     ctrl_cap_flag;
+       u32                     gst_offset;
+       u32                     inbound_queue_offset;
+       u32                     outbound_queue_offset;
+       u32                     inbound_q_nppd_hppd;
+       u32                     rsvd[8];
+       u32                     crc_core_dump;
+       u32                     rsvd1;
+       u32                     upper_event_log_addr;
+       u32                     lower_event_log_addr;
+       u32                     event_log_size;
+       u32                     event_log_severity;
+       u32                     upper_pcs_event_log_addr;
+       u32                     lower_pcs_event_log_addr;
+       u32                     pcs_event_log_size;
+       u32                     pcs_event_log_severity;
+       u32                     fatal_err_interrupt;
+       u32                     fatal_err_dump_offset0;
+       u32                     fatal_err_dump_length0;
+       u32                     fatal_err_dump_offset1;
+       u32                     fatal_err_dump_length1;
+       u32                     gpio_led_mapping;
+       u32                     analog_setup_table_offset;
+       u32                     int_vec_table_offset;
+       u32                     phy_attr_table_offset;
+       u32                     port_recovery_timer;
+       u32                     interrupt_reassertion_delay;
+       } pm80xx_tbl;
 };
-struct general_status_table {
+
+union general_status_table {
+       struct {
        u32                     gst_len_mpistate;
        u32                     iq_freeze_state0;
        u32                     iq_freeze_state1;
        u32                     msgu_tcnt;
        u32                     iop_tcnt;
-       u32                     reserved;
+       u32                     rsvd;
        u32                     phy_state[8];
-       u32                     reserved1;
-       u32                     reserved2;
-       u32                     reserved3;
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
+       u32                     recover_err_info[8];
+       } pm8001_tbl;
+       struct {
+       u32                     gst_len_mpistate;
+       u32                     iq_freeze_state0;
+       u32                     iq_freeze_state1;
+       u32                     msgu_tcnt;
+       u32                     iop_tcnt;
+       u32                     rsvd[9];
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
        u32                     recover_err_info[8];
+       } pm80xx_tbl;
 };
 struct inbound_queue_table {
        u32                     element_pri_size_cnt;
@@ -351,15 +416,21 @@ struct pm8001_hba_info {
        struct device           *dev;
        struct pm8001_hba_memspace io_mem[6];
        struct mpi_mem_req      memoryMap;
+       struct encrypt          encrypt_info; /* support encryption */
        void __iomem    *msg_unit_tbl_addr;/*Message Unit Table Addr*/
        void __iomem    *main_cfg_tbl_addr;/*Main Config Table Addr*/
        void __iomem    *general_stat_tbl_addr;/*General Status Table Addr*/
        void __iomem    *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
        void __iomem    *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
-       struct main_cfg_table   main_cfg_tbl;
-       struct general_status_table     gs_tbl;
-       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_INB_NUM];
-       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
+       void __iomem    *pspa_q_tbl_addr;
+                       /*MPI SAS PHY attributes Queue Config Table Addr*/
+       void __iomem    *ivt_tbl_addr; /*MPI IVT Table Addr */
+       union main_cfg_table    main_cfg_tbl;
+       union general_status_table      gs_tbl;
+       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
+       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+       struct sas_phy_attribute_table  phy_attr_table;
+                                       /* MPI SAS PHY attributes */
        u8                      sas_addr[SAS_ADDR_SIZE];
        struct sas_ha_struct    *sas;/* SCSI/SAS glue */
        struct Scsi_Host        *shost;
@@ -372,10 +443,12 @@ struct pm8001_hba_info {
        struct pm8001_port      port[PM8001_MAX_PHYS];
        u32                     id;
        u32                     irq;
+       u32                     iomb_size; /* SPC and SPCV IOMB size */
        struct pm8001_device    *devices;
        struct pm8001_ccb_info  *ccb_info;
 #ifdef PM8001_USE_MSIX
-       struct msix_entry       msix_entries[16];/*for msi-x interrupt*/
+       struct msix_entry       msix_entries[PM8001_MAX_MSIX_VEC];
+                                       /*for msi-x interrupt*/
        int                     number_of_intr;/*will be used in remove()*/
 #endif
 #ifdef PM8001_USE_TASKLET
@@ -383,7 +456,10 @@ struct pm8001_hba_info {
 #endif
        u32                     logging_level;
        u32                     fw_status;
+       u32                     smp_exp_mode;
+       u32                     int_vector;
        const struct firmware   *fw_image;
+       u8                      outq[PM8001_MAX_MSIX_VEC];
 };
 
 struct pm8001_work {
@@ -419,6 +495,9 @@ struct pm8001_fw_image_header {
 #define FLASH_UPDATE_DNLD_NOT_SUPPORTED                0x10
 #define FLASH_UPDATE_DISABLED                  0x11
 
+#define        NCQ_READ_LOG_FLAG                       0x80000000
+#define        NCQ_ABORT_ALL_FLAG                      0x40000000
+#define        NCQ_2ND_RLE_FLAG                        0x20000000
 /**
  * brief param structure for firmware flash update.
  */
@@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev);
 void pm8001_dev_gone(struct domain_device *dev);
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
 int pm8001_I_T_nexus_reset(struct domain_device *dev);
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
 int pm8001_query_task(struct sas_task *task);
 void pm8001_open_reject_retry(
        struct pm8001_hba_info *pm8001_ha,
@@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
        dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
        u32 mem_size, u32 align);
 
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+                       struct inbound_queue_table *circularQ,
+                       u32 opCode, void *payload, u32 responseQueue);
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
+                               u16 messageSize, void **messagePtr);
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+                       struct outbound_queue_table *circularQ, u8 bc);
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+                       struct outbound_queue_table *circularQ,
+                       void **messagePtr1, u8 *pBC);
+int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+                       struct pm8001_device *pm8001_dev, u32 state);
+int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+                                       void *payload);
+int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+                                       void *fw_flash_updata_info, u32 tag);
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_ccb_info *ccb,
+                               struct pm8001_tmf_task *tmf);
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_device *pm8001_dev,
+                               u8 flag, u32 task_tag, u32 cmd_tag);
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
+void pm8001_work_fn(struct work_struct *work);
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
+                                       void *data, int handler);
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+struct sas_task *pm8001_alloc_task(void);
+void pm8001_task_done(struct sas_task *task);
+void pm8001_free_task(struct sas_task *task);
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id);
+int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
+
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
 
 /* ctl shared API */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644 (file)
index 0000000..302514d
--- /dev/null
@@ -0,0 +1,4130 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include <linux/slab.h>
+ #include "pm8001_sas.h"
+ #include "pm80xx_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+#define SMP_DIRECT 1
+#define SMP_INDIRECT 2
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature    =
+               pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
+               pm8001_mr32(address, MAIN_INTERFACE_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
+               pm8001_mr32(address, MAIN_FW_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io   =
+               pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl      =
+               pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
+               pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset   =
+               pm8001_mr32(address, MAIN_GST_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
+               pm8001_mr32(address, MAIN_IBQ_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
+               pm8001_mr32(address, MAIN_OBQ_OFFSET);
+
+       /* read Error Dump Offset and Length */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+
+       /* read GPIO LED settings from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
+               pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
+
+       /* read analog Setting offset from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
+               pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
+               pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
+               pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+       pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate   =
+                       pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt          =
+                       pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt           =
+                       pm8001_mr32(address, GST_IOPTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val     =
+                       pm8001_mr32(address, GST_GPIO_INPUT_VAL);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET0);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET1);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET2);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET3);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET4);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET5);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET6);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
+                        pm8001_mr32(address, GST_RERRINFO_OFFSET7);
+}
+/**
+ * read_phy_attr_table - read the phy attribute table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
+       pm8001_ha->phy_attr_table.phystart1_16[0] =
+                       pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[1] =
+                       pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[2] =
+                       pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[3] =
+                       pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[4] =
+                       pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[5] =
+                       pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[6] =
+                       pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[7] =
+                       pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[8] =
+                       pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[9] =
+                       pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[10] =
+                       pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[11] =
+                       pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[12] =
+                       pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[13] =
+                       pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[14] =
+                       pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[15] =
+                       pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
+
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
+
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               u32 offset = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + IB_PIPCI_BAR)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset =
+                       pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               u32 offset = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + OB_CIPCI_BAR)));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset =
+                       pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       u32 offsetib, offsetob;
+       void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+       void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size               =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity           = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size           =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity       = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt          = 0x01;
+
+       /* Disable end to end CRC checking */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
+
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+               pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].base_virt             =
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+               pm8001_ha->inbnd_q_tbl[i].total_length          =
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
+               pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].ci_virt               =
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+               offsetib = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
+                       get_pci_bar_index(pm8001_mr32(addressib,
+                               (offsetib + 0x14)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset             =
+                       pm8001_mr32(addressib, (offsetib + 0x18));
+               pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
+               pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
+       }
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+               pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+               pm8001_ha->outbnd_q_tbl[i].base_virt            =
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+               pm8001_ha->outbnd_q_tbl[i].total_length         =
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
+               pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+               /* interrupt vector based on oq */
+               pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
+               pm8001_ha->outbnd_q_tbl[i].pi_virt              =
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+               offsetob = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
+                       get_pci_bar_index(pm8001_mr32(addressob,
+                       offsetob + 0x14));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset            =
+                       pm8001_mr32(addressob, (offsetob + 0x18));
+               pm8001_ha->outbnd_q_tbl[i].consumer_idx         = 0;
+               pm8001_ha->outbnd_q_tbl[i].producer_index       = 0;
+       }
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+       pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+       pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+       pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+       pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
+
+       /* SPCv specific */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
+       /* Set GPIOLED to 0x2 for LED indicator */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
+       pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+
+       pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
+       pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                        int number)
+{
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       u16 offset = number * 0x20;
+       pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                                int number)
+{
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       u16 offset = number * 0x24;
+       pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+       pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+
+       /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is updated */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_UPDATE;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count)
+               return -1;
+       /* check the MPI-State for initialization upto 100ms*/
+       max_wait_count = 100 * 1000;/* 100 msec */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                                       GST_GSTLEN_MPIS_OFFSET);
+       } while ((GST_MPI_STATE_INIT !=
+               (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
+       if (!max_wait_count)
+               return -1;
+
+       /* check MPI Initialization error */
+       gst_len_mpistate = gst_len_mpistate >> 16;
+       if (0x0000 != gst_len_mpistate)
+               return -1;
+
+       return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+       u32 max_wait_count;
+       u32 max_wait_time;
+       int ret = 0;
+
+       /* reset / PCIe ready */
+       max_wait_time = max_wait_count = 100 * 1000;    /* 100 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while ((value == 0xFFFFFFFF) && (--max_wait_count));
+
+       /* check ila status */
+       max_wait_time = max_wait_count = 1000 * 1000;   /* 1000 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_ILA_READY) !=
+                       SCRATCH_PAD_ILA_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" ila ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check RAAE status */
+       max_wait_time = max_wait_count = 1800 * 1000;   /* 1800 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_RAAE_READY) !=
+                               SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" raae ready status in %d millisec\n",
+                                       (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop0 status */
+       max_wait_time = max_wait_count = 600 * 1000;    /* 600 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
+                       (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" iop0 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop1 status only for 16 port controllers */
+       if ((pm8001_ha->chip_id != chip_8008) &&
+                       (pm8001_ha->chip_id != chip_8009)) {
+               /* 200 milli sec */
+               max_wait_time = max_wait_count = 200 * 1000;
+               do {
+                       udelay(1);
+                       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+               } while (((value & SCRATCH_PAD_IOP1_READY) !=
+                               SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
+               if (!max_wait_count)
+                       ret = -1;
+               else {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "iop1 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+               }
+       }
+
+       return ret;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *base_addr;
+       u32     value;
+       u32     offset;
+       u32     pcibar;
+       u32     pcilogic;
+
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+       offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
+                               offset, value));
+       pcilogic = (value & 0xFC000000) >> 26;
+       pcibar = get_pci_bar_index(pcilogic);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+       pm8001_ha->main_cfg_tbl_addr = base_addr =
+               pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+       pm8001_ha->general_stat_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
+                                       0xFFFFFF);
+       pm8001_ha->inbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
+                                       0xFFFFFF);
+       pm8001_ha->outbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
+                                       0xFFFFFF);
+       pm8001_ha->ivt_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
+                                       0xFFFFFF);
+       pm8001_ha->pspa_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
+                                       0xFFFFFF);
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("GST OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("INBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("OBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("IVT OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("PSPA OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - main cfg %p general status %p\n",
+                       pm8001_ha->main_cfg_tbl_addr,
+                       pm8001_ha->general_stat_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - inbnd %p obnd %p\n",
+                       pm8001_ha->inbnd_q_tbl_addr,
+                       pm8001_ha->outbnd_q_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - pspa %p ivt %p\n",
+                       pm8001_ha->pspa_q_tbl_addr,
+                       pm8001_ha->ivt_tbl_addr));
+}
+
+/**
+ * pm80xx_set_thermal_config - support the thermal configuration
+ * @pm8001_ha: our hba card information.
+ */
+int
+pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
+                       (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+       payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return rc;
+
+}
+
+/**
+* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
+* Timer configuration page
+* @pm8001_ha: our hba card information.
+*/
+static int
+pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       SASProtocolTimerConfig_t SASConfigPage;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+
+       SASConfigPage.pageCode        =  SAS_PROTOCOL_TIMER_CONFIG_PAGE;
+       SASConfigPage.MST_MSI         =  3 << 15;
+       SASConfigPage.STP_SSP_MCT_TMO =  (STP_MCT_TMO << 16) | SSP_MCT_TMO;
+       SASConfigPage.STP_FRM_TMO     = (SAS_MAX_OPEN_TIME << 24) |
+                               (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
+       SASConfigPage.STP_IDLE_TMO    =  STP_IDLE_TIME;
+
+       if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
+               SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
+
+
+       SASConfigPage.OPNRJT_RTRY_INTVL =         (SAS_MFD << 16) |
+                                               SAS_OPNRJT_RTRY_INTVL;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =  (SAS_DOPNRJT_RTRY_TMO << 16)
+                                               | SAS_COPNRJT_RTRY_TMO;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =  (SAS_DOPNRJT_RTRY_THR << 16)
+                                               | SAS_COPNRJT_RTRY_THR;
+       SASConfigPage.MAX_AIP =  SAS_MAX_AIP;
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.pageCode "
+                       "0x%08x\n", SASConfigPage.pageCode));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.MST_MSI "
+                       " 0x%08x\n", SASConfigPage.MST_MSI));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_FRM_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_IDLE_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
+                       " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
+       PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
+                       " 0x%08x\n", SASConfigPage.MAX_AIP));
+
+       memcpy(&payload.cfg_pg, &SASConfigPage,
+                        sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_get_encrypt_info - Check for encryption
+ * @pm8001_ha: our hba card information.
+ */
+static int
+pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 scratch3_value;
+       int ret;
+
+       /* Read encryption status from SCRATCH PAD 3 */
+       scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+       if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                       SCRATCH_PAD3_ENC_READY) {
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               pm8001_ha->encrypt_info.status = 0;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
+                       "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
+                                       SCRATCH_PAD3_ENC_DISABLED) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+                       scratch3_value));
+               pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
+               pm8001_ha->encrypt_info.cipher_mode = 0;
+               pm8001_ha->encrypt_info.sec_mode = 0;
+               return 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                               SCRATCH_PAD3_ENC_DIS_ERR) {
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                SCRATCH_PAD3_ENC_ENA_ERR) {
+
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_encrypt_update - update flash with encryption informtion
+ * @pm8001_ha: our hba card information.
+ */
+static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
+{
+       struct kek_mgmt_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_KEK_MANAGEMENT;
+
+       memset(&payload, 0, sizeof(struct kek_mgmt_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       /* Currently only one key is used. New KEK index is 1.
+        * Current KEK index is 1. Store KEK to NVRAM is 1.
+        */
+       payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
+                                       KEK_MGMT_SUBOP_KEYCARDUPDATE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+       int ret;
+       u8 i = 0;
+
+       /* check the firmware status */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+
+       /* Initialize pci space address eg: mpi offset */
+       init_pci_device_addresses(pm8001_ha);
+       init_default_table_values(pm8001_ha);
+       read_main_config_table(pm8001_ha);
+       read_general_status_table(pm8001_ha);
+       read_inbnd_queue_table(pm8001_ha);
+       read_outbnd_queue_table(pm8001_ha);
+       read_phy_attr_table(pm8001_ha);
+
+       /* update main config table ,inbound table and outbound table */
+       update_main_config_table(pm8001_ha);
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+
+       /* notify firmware update finished and check initialization status */
+       if (0 == mpi_init_check(pm8001_ha)) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MPI initialize successful!\n"));
+       } else
+               return -EBUSY;
+
+       /* send SAS protocol timer configuration page to FW */
+       ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+
+       /* Check for encryption */
+       if (pm8001_ha->chip->encrypt) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("Checking for encryption\n"));
+               ret = pm80xx_get_encrypt_info(pm8001_ha);
+               if (ret == -1) {
+                       PM8001_INIT_DBG(pm8001_ha,
+                               pm8001_printk("Encryption error !!\n"));
+                       if (pm8001_ha->encrypt_info.status == 0x81) {
+                               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                                       "Encryption enabled with error."
+                                       "Saving encryption key to flash\n"));
+                               pm80xx_encrypt_update(pm8001_ha);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+       init_pci_device_addresses(pm8001_ha);
+       /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is stop */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
+
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_RESET;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+               return -1;
+       }
+
+       /* check the MPI-State for termination in progress */
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                       GST_GSTLEN_MPIS_OFFSET);
+               if (GST_MPI_STATE_UNINIT ==
+                       (gst_len_mpistate & GST_MPI_STATE_MASK))
+                       break;
+       } while (--max_wait_count);
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+                               gst_len_mpistate & GST_MPI_STATE_MASK));
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ */
+
+static int
+pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 regval;
+       u32 bootloader_state;
+
+       /* Check if MPI is in ready state to reset */
+       if (mpi_uninit_check(pm8001_ha) != 0) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("MPI state is not ready\n"));
+               return -1;
+       }
+
+       /* checked for reset register normal state; 0x0 */
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("reset register before write : 0x%x\n", regval));
+
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
+       mdelay(500);
+
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+       pm8001_printk("reset register after write 0x%x\n", regval));
+
+       if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
+                       SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset successful [regval: 0x%x]\n",
+                                       regval));
+       } else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset failed [regval: 0x%x]\n",
+                                       regval));
+
+               /* check bootloader is successfully executed or in HDA mode */
+               bootloader_state =
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+                       SCRATCH_PAD1_BOOTSTATE_MASK;
+
+               if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode SEEPROM\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode Bootstrap Pin\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode soft reset\n"));
+               } else if (bootloader_state ==
+                                       SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state-HDA mode critical error\n"));
+               }
+               return -EBUSY;
+       }
+
+       /* check the firmware status after reset */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPCv soft reset Complete\n"));
+       return 0;
+}
+
+static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+        u32 i;
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset start\n"));
+
+       /* do SPCv chip reset. */
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPC soft reset Complete\n"));
+
+       /* Check this ..whether delay is required or no */
+       /* delay 10 usec */
+       udelay(10);
+
+       /* wait for 20 msec until the firmware gets reloaded */
+       i = 20;
+       do {
+               mdelay(1);
+       } while ((--i) != 0);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       mask = (u32)(1 << vec);
+
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       if (vec == 0xFF)
+               mask = 0xFFFFFFFF;
+       else
+               mask = (u32)(1 << vec);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+}
+
+static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 param;
+       u32 tag;
+       struct ssp_completion_resp *psspPayload;
+       struct task_status_struct *ts;
+       struct ssp_response_iu *iu;
+       struct pm8001_device *pm8001_dev;
+       psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psspPayload->status);
+       tag = le32_to_cpu(psspPayload->tag);
+       ccb = &pm8001_ha->ccb_info[tag];
+       if ((status == IO_ABORTED) && ccb->open_retry) {
+               /* Being completed by another */
+               ccb->open_retry = 0;
+               return;
+       }
+       pm8001_dev = ccb->device;
+       param = le32_to_cpu(psspPayload->param);
+       t = ccb->task;
+
+       if (status && status != IO_UNDERFLOW)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
+                               param));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+               } else {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       iu = &psspPayload->ssp_resp_iu;
+                       sas_ssp_task_response(pm8001_ha->dev, t, iu);
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_UNDERFLOW:
+               /* SSP Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
+                               param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               /* Force the midlayer to retry */
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_DS_NON_OPERATIONAL);
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_TM_TAG_NOT_FOUND:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       }
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("scsi_status = 0x%x\n ",
+               psspPayload->ssp_resp_iu.status));
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       unsigned long flags;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct ssp_event_resp *psspPayload =
+               (struct ssp_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psspPayload->event);
+       u32 tag = le32_to_cpu(psspPayload->tag);
+       u32 port_id = le32_to_cpu(psspPayload->port_id);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       t = ccb->task;
+       pm8001_dev = ccb->device;
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", event));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+               return;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+               return;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               return;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with event 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       u32 param;
+       u32 status;
+       u32 tag;
+       struct sata_completion_resp *psataPayload;
+       struct task_status_struct *ts;
+       struct ata_task_resp *resp ;
+       u32 *sata_resp;
+       struct pm8001_device *pm8001_dev;
+       unsigned long flags;
+
+       psataPayload = (struct sata_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psataPayload->status);
+       tag = le32_to_cpu(psataPayload->tag);
+
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psataPayload->param);
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       if (!ts) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ts null\n"));
+               return;
+       }
+
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
+               } else {
+                       u8 len;
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+                               param));
+                       sata_resp = &psataPayload->sata_resp[0];
+                       resp = (struct ata_task_resp *)ts->buf;
+                       if (t->ata_task.dma_xfer == 0 &&
+                       t->data_dir == PCI_DMA_FROMDEVICE) {
+                               len = sizeof(struct pio_setup_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("PIO read len = %d\n", len));
+                       } else if (t->ata_task.use_ncq) {
+                               len = sizeof(struct set_dev_bits_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("FPDMA len = %d\n", len));
+                       } else {
+                               len = sizeof(struct dev_to_host_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("other len = %d\n", len));
+                       }
+                       if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+                               resp->frame_len = len;
+                               memcpy(&resp->ending_fis[0], sata_resp, len);
+                               ts->buf_valid_size = sizeof(*resp);
+                       } else
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("response to large\n"));
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+               /* following cases are to do cases */
+       case IO_UNDERFLOW:
+               /* SATA Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*in order to force CPU ordering*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/* ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_NON_OPERATIONAL);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_IN_ERROR);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct sata_event_resp *psataPayload =
+               (struct sata_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psataPayload->event);
+       u32 tag = le32_to_cpu(psataPayload->tag);
+       u32 port_id = le32_to_cpu(psataPayload->port_id);
+       u32 dev_id = le32_to_cpu(psataPayload->device_id);
+       unsigned long flags;
+
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+               return;
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm80xx_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
+       if (unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_PEER_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               break;
+       case IO_XFER_PIO_SETUP_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_DMA_ACTIVATE_TIMEOUT:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       u32 param, i;
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 tag;
+       struct smp_completion_resp *psmpPayload;
+       struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
+       char *pdma_respaddr = NULL;
+
+       psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psmpPayload->status);
+       tag = le32_to_cpu(psmpPayload->tag);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psmpPayload->param);
+       t = ccb->task;
+       ts = &t->task_status;
+       pm8001_dev = ccb->device;
+       if (status)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("smp IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+
+       switch (status) {
+
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_GOOD;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("DIRECT RESPONSE Length:%d\n",
+                                               param));
+                       pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
+                                               ((u64)sg_dma_address
+                                               (&t->smp_task.smp_resp))));
+                       for (i = 0; i < param; i++) {
+                               *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+                                       i, *(pdma_respaddr+i),
+                                       psmpPayload->_r_a[i]));
+                       }
+               }
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_ERROR_HW_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_RX_FRAME:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_ERROR_INTERNAL_SMP_RESOURCE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_QUEUE_FULL;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               /* not allowed case. Therefore, return failed status */
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x"
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/**
+ * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+       u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+       struct hw_event_ack_req  payload;
+       u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+       struct inbound_queue_table *circularQ;
+
+       memset((u8 *)&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+       payload.tag = cpu_to_le32(1);
+       payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+               ((phyId & 0xFF) << 24) | (port_id & 0xFF));
+       payload.param0 = cpu_to_le32(param0);
+       payload.param1 = cpu_to_le32(param1);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       u8 deviceType = pPayload->sas_identify.dev_type;
+       port->port_state = portstate;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "portid:%d; phyid:%d; linkrate:%d; "
+               "portstate:%x; devicetype:%x\n",
+               port_id, phy_id, link_rate, portstate, deviceType));
+
+       switch (deviceType) {
+       case SAS_PHY_UNUSED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("device type no device.\n"));
+               break;
+       case SAS_END_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+               pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+                       PHY_NOTIFY_ENABLE_SPINUP);
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_EDGE_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_FANOUT_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("fanout expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unknown device type(%x)\n", deviceType));
+               break;
+       }
+       phy->phy_type |= PORT_TYPE_SAS;
+       phy->identify.device_type = deviceType;
+       phy->phy_attached = 1;
+       if (phy->identify.device_type == SAS_END_DEVICE)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+       else if (phy->identify.device_type != SAS_PHY_UNUSED)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+       phy->sas_phy.oob_mode = SAS_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+               sizeof(struct sas_identify_frame)-4);
+       phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       if (pm8001_ha->flags == PM8001F_RUN_TIME)
+               mdelay(200);/*delay a moment to wait disk to spinup*/
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "port id %d, phy id %d link_rate %d portstate 0x%x\n",
+                               port_id, phy_id, link_rate, portstate));
+
+       port->port_state = portstate;
+       port->port_attached = 1;
+       pm8001_get_lrate_mode(phy, link_rate);
+       phy->phy_type |= PORT_TYPE_SATA;
+       phy->phy_attached = 1;
+       phy->sas_phy.oob_mode = SATA_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+               sizeof(struct dev_to_host_fis));
+       phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+       phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+       phy->identify.device_type = SAS_SATA_DEV;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       port->port_state = portstate;
+       phy->phy_type = 0;
+       phy->identify.device_type = 0;
+       phy->phy_attached = 0;
+       memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+       switch (portstate) {
+       case PORT_VALID:
+               break;
+       case PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" PortInvalid portID %d\n", port_id));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       case PORT_IN_RESET:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Port In Reset portID %d\n", port_id));
+               break;
+       case PORT_NOT_ESTABLISHED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+               port->port_attached = 0;
+               break;
+       case PORT_LOSTCOMM:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       default:
+               port->port_attached = 0;
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and(default) = 0x%x\n",
+                       portstate));
+               break;
+
+       }
+}
+
+static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_start_resp *pPayload =
+               (struct phy_start_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phy_id =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
+                               status, phy_id));
+       if (status == 0) {
+               phy->phy_state = 1;
+               if (pm8001_ha->flags == PM8001F_RUN_TIME)
+                       complete(phy->enable_completion);
+       }
+       return 0;
+
+}
+
+/**
+ * mpi_thermal_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct thermal_hw_event *pPayload =
+               (struct thermal_hw_event *)(piomb + 4);
+
+       u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
+       u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
+
+       if (thermal_event & 0x40) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Local high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured local high temperature %d\n",
+                               ((rht_lht & 0xFF00) >> 8)));
+       }
+       if (thermal_event & 0x10) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Remote high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured remote high temperature %d\n",
+                               ((rht_lht & 0xFF000000) >> 24)));
+       }
+       return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       unsigned long flags;
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u16 eventType =
+               (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
+       u8 status =
+               (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
+
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+       PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
+                               port_id, phy_id, eventType, status));
+
+       switch (eventType) {
+
+       case HW_EVENT_SAS_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+               hw_event_sas_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+               hw_event_sata_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_SPINUP_HOLD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+               break;
+       case HW_EVENT_PHY_DOWN:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+               phy->phy_attached = 0;
+               phy->phy_state = 0;
+               hw_event_phy_down(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       /* the broadcast change primitive received, tell the LIBSAS this event
+       to revalidate the sas domain*/
+       case HW_EVENT_BROADCAST_CHANGE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+                       port_id, phy_id, 1, 0);
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_PHY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+               sas_phy_disconnected(&phy->sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+               break;
+       case HW_EVENT_BROADCAST_EXP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_LINK_ERR_INVALID_DWORD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_CODE_VIOLATION,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_MALFUNCTION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+               break;
+       case HW_EVENT_BROADCAST_SES:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_INBOUND_CRC_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_INBOUND_CRC_ERROR,
+                       port_id, phy_id, 0, 0);
+               break;
+       case HW_EVENT_HARD_RESET_RECEIVED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+               sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+               break;
+       case HW_EVENT_ID_FRAME_TIMEOUT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RESET_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_PORT_RECOVERY_TIMER_TMO,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVER:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+               break;
+       case HW_EVENT_PORT_RESET_COMPLETE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+               break;
+       case EVENT_BROADCAST_ASYNCH_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("Unknown event type 0x%x\n", eventType));
+               break;
+       }
+       return 0;
+}
+
+/**
+ * mpi_phy_stop_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_stop_resp *pPayload =
+               (struct phy_stop_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phyid =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("phy:0x%x status:0x%x\n",
+                                       phyid, status));
+       if (status == 0)
+               phy->phy_state = 0;
+       return 0;
+}
+
+/**
+ * mpi_set_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct set_ctrl_cfg_resp *pPayload =
+                       (struct set_ctrl_cfg_resp *)(piomb + 4);
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+                       status, err_qlfr_pgcd));
+
+       return 0;
+}
+
+/**
+ * mpi_get_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_get_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_flash_op_ext_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_set_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_kek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
+
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
+       u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+               status, kidx_new_curr_ksop, err_qlfr));
+
+       return 0;
+}
+
+/**
+ * mpi_dek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * ssp_coalesced_comp_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       __le32 pHeader = *(__le32 *)piomb;
+       u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
+
+       switch (opc) {
+       case OPC_OUB_ECHO:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+               break;
+       case OPC_OUB_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_HW_EVENT\n"));
+               mpi_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_THERM_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+               mpi_thermal_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_COMP\n"));
+               mpi_ssp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SMP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_COMP\n"));
+               mpi_smp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_LOCAL_PHY_CNTRL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_REGIST:
+               PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEREG_DEV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unresgister the deviece\n"));
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEV_HANDLE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+               break;
+       case OPC_OUB_SATA_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_COMP\n"));
+               mpi_sata_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+               mpi_sata_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+               mpi_ssp_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_HANDLE_ARRIV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_SSP_RECV_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_FW_FLASH_UPDATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GPIO_RESPONSE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+               break;
+       case OPC_OUB_GPIO_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+               break;
+       case OPC_OUB_GENERAL_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+               pm8001_mpi_general_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SAS_DIAG_MODE_START_END:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+               break;
+       case OPC_OUB_SAS_DIAG_EXECUTE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+               break;
+       case OPC_OUB_GET_TIME_STAMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+               break;
+       case OPC_OUB_SAS_HW_EVENT_ACK:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+               break;
+       case OPC_OUB_PORT_CONTROL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+               break;
+       case OPC_OUB_SMP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+               break;
+       case OPC_OUB_SET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+               break;
+       case OPC_OUB_SET_DEV_INFO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+               break;
+       /* spcv specifc commands */
+       case OPC_OUB_PHY_START_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+               mpi_phy_start_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_PHY_STOP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+               mpi_phy_stop_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_set_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_get_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_get_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_FLASH_OP_EXT:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+               mpi_flash_op_ext_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_set_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_KEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_kek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_dek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COALESCED_COMP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+               ssp_coalesced_comp_resp(pm8001_ha, piomb);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+               break;
+       }
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       struct outbound_queue_table *circularQ;
+       void *pMsg1 = NULL;
+       u8 uninitialized_var(bc);
+       u32 ret = MPI_IO_STATUS_FAIL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pm8001_ha->lock, flags);
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+       do {
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               if (MPI_IO_STATUS_SUCCESS == ret) {
+                       /* process the outbound message */
+                       process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+                       /* free the message from the outbound circular buffer */
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
+               }
+               if (MPI_IO_STATUS_BUSY == ret) {
+                       /* Update the producer index from SPC */
+                       circularQ->producer_index =
+                               cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+                       if (le32_to_cpu(circularQ->producer_index) ==
+                               circularQ->consumer_idx)
+                               /* OQ is empty */
+                               break;
+               }
+       } while (1);
+       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+       return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+       [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+       [PCI_DMA_TODEVICE]      = DATA_DIR_OUT,/* OUTBOUND */
+       [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
+       [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
+};
+
+static void build_smp_cmd(u32 deviceID, __le32 hTag,
+                       struct smp_req *psmp_cmd, int mode, int length)
+{
+       psmp_cmd->tag = hTag;
+       psmp_cmd->device_id = cpu_to_le32(deviceID);
+       if (mode == SMP_DIRECT) {
+               length = length - 4; /* subtract crc */
+               psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
+       } else {
+               psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+       }
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       int elem, rc;
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct scatterlist *sg_req, *sg_resp;
+       u32 req_len, resp_len;
+       struct smp_req smp_cmd;
+       u32 opc;
+       struct inbound_queue_table *circularQ;
+       char *preq_dma_addr = NULL;
+       __le64 tmp_addr;
+       u32 i, length;
+
+       memset(&smp_cmd, 0, sizeof(smp_cmd));
+       /*
+        * DMA-map SMP request, response buffers
+        */
+       sg_req = &task->smp_task.smp_req;
+       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       if (!elem)
+               return -ENOMEM;
+       req_len = sg_dma_len(sg_req);
+
+       sg_resp = &task->smp_task.smp_resp;
+       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       if (!elem) {
+               rc = -ENOMEM;
+               goto err_out;
+       }
+       resp_len = sg_dma_len(sg_resp);
+       /* must be in dwords */
+       if ((req_len & 0x3) || (resp_len & 0x3)) {
+               rc = -EINVAL;
+               goto err_out_2;
+       }
+
+       opc = OPC_INB_SMP_REQUEST;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+
+       length = sg_req->length;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+       if (!(length - 8))
+               pm8001_ha->smp_exp_mode = SMP_DIRECT;
+       else
+               pm8001_ha->smp_exp_mode = SMP_INDIRECT;
+
+       /* DIRECT MODE support only in spcv/ve */
+       pm8001_ha->smp_exp_mode = SMP_DIRECT;
+
+       tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+       preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+
+       /* INDIRECT MODE command settings. Use DMA */
+       if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+               /* for SPCv indirect mode. Place the top 4 bytes of
+                * SMP Request header here. */
+               for (i = 0; i < 4; i++)
+                       smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+               /* exclude top 4 bytes for SMP req header */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_req) - 4);
+               /* exclude 4 bytes for SMP req header and CRC */
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
+               smp_cmd.long_smp_req.long_resp_addr =
+                               cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                               cpu_to_le32((u32)sg_dma_len
+                                       (&task->smp_task.smp_resp)-4);
+       } else { /* DIRECT MODE */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_req));
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+               smp_cmd.long_smp_req.long_resp_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                       cpu_to_le32
+                       ((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+       }
+       if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+               for (i = 0; i < length; i++)
+                       if (i < 16) {
+                               smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req16[i],
+                                       *(preq_dma_addr)));
+                       } else {
+                               smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req[i],
+                                       *(preq_dma_addr)));
+                       }
+       }
+
+       build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
+                               &smp_cmd, pm8001_ha->smp_exp_mode, length);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
+       return 0;
+
+err_out_2:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+                       PCI_DMA_FROMDEVICE);
+err_out:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+                       PCI_DMA_TODEVICE);
+       return rc;
+}
+
+static int check_enc_sas_cmd(struct sas_task *task)
+{
+       if ((task->ssp_task.cdb[0] == READ_10)
+               || (task->ssp_task.cdb[0] == WRITE_10)
+               || (task->ssp_task.cdb[0] == WRITE_VERIFY))
+               return 1;
+       else
+               return 0;
+}
+
+static int check_enc_sat_cmd(struct sas_task *task)
+{
+       int ret = 0;
+       switch (task->ata_task.fis.command) {
+       case ATA_CMD_FPDMA_READ:
+       case ATA_CMD_READ_EXT:
+       case ATA_CMD_READ:
+       case ATA_CMD_FPDMA_WRITE:
+       case ATA_CMD_WRITE_EXT:
+       case ATA_CMD_WRITE:
+       case ATA_CMD_PIO_READ:
+       case ATA_CMD_PIO_READ_EXT:
+       case ATA_CMD_PIO_WRITE:
+       case ATA_CMD_PIO_WRITE_EXT:
+               ret = 1;
+               break;
+       default:
+               ret = 0;
+               break;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct ssp_ini_io_start_req ssp_cmd;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       u64 phys_addr;
+       struct inbound_queue_table *circularQ;
+       static u32 inb;
+       static u32 outb;
+       u32 opc = OPC_INB_SSPINIIOSTART;
+       memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+       memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+       /* data address domain added for spcv; set to 0 by host,
+        * used internally by controller
+        * 0 for SAS 1.1 and SAS 2.0 compatible TLR
+        */
+       ssp_cmd.dad_dir_m_tlr =
+               cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
+       ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+       ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+       ssp_cmd.tag = cpu_to_le32(tag);
+       if (task->ssp_task.enable_first_burst)
+               ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+       memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+                       task->ssp_task.cdb[0]));
+               opc = OPC_INB_SSP_INI_DIF_ENC_IO;
+               /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
+               ssp_cmd.dad_dir_m_tlr = cpu_to_le32
+                       ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.enc_addr_low = 0;
+                       ssp_cmd.enc_addr_high = 0;
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               ssp_cmd.key_cmode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
+                                               (task->ssp_task.cdb[3] << 16) |
+                                               (task->ssp_task.cdb[4] << 8) |
+                                               (task->ssp_task.cdb[5]));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SAS command 0x%x inb q %x\n",
+                       task->ssp_task.cdb[0], inb));
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter, ccb->n_elem,
+                                       ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.addr_low = 0;
+                       ssp_cmd.addr_high = 0;
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               }
+       }
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+
+       return ret;
+}
+
+static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       static u32 inb;
+       static u32 outb;
+       struct sata_start_req sata_cmd;
+       u32 hdr_tag, ncg_tag = 0;
+       u64 phys_addr;
+       u32 ATAP = 0x0;
+       u32 dir;
+       struct inbound_queue_table *circularQ;
+       unsigned long flags;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       if (task->data_dir == PCI_DMA_NONE) {
+               ATAP = 0x04; /* no data*/
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+       } else if (likely(!task->ata_task.device_control_reg_update)) {
+               if (task->ata_task.dma_xfer) {
+                       ATAP = 0x06; /* DMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+               } else {
+                       ATAP = 0x05; /* PIO*/
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+               }
+               if (task->ata_task.use_ncq &&
+                       dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+                       ATAP = 0x07; /* FPDMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+               }
+       }
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+               ncg_tag = hdr_tag;
+       }
+       dir = data_dir_flags[task->data_dir] << 8;
+       sata_cmd.tag = cpu_to_le32(tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+
+       sata_cmd.sata_fis = task->ata_task.fis;
+       if (likely(!task->ata_task.device_control_reg_update))
+               sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+       sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+                       sata_cmd.sata_fis.command));
+               opc = OPC_INB_SATA_DIF_ENC_IO;
+
+               /* set encryption bit */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16)|
+                               ((ATAP & 0x3f) << 10) | 0x20 | dir);
+                                                       /* dad (bit 0-1) is 0 */
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.enc_addr_low = 0;
+                       sata_cmd.enc_addr_high = 0;
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               sata_cmd.key_index_mode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               sata_cmd.twk_val0 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
+                                       (sata_cmd.sata_fis.lbah << 16) |
+                                       (sata_cmd.sata_fis.lbam << 8) |
+                                       (sata_cmd.sata_fis.lbal));
+               sata_cmd.twk_val1 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
+                                        (sata_cmd.sata_fis.lbam_exp));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SATA command 0x%x inb %x\n",
+                       sata_cmd.sata_fis.command, inb));
+               /* dad (bit 0-1) is 0 */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16) |
+                                       ((ATAP & 0x3f) << 10) | dir);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                       ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.addr_low = 0;
+                       sata_cmd.addr_high = 0;
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               }
+                       /* scsi cdb */
+                       sata_cmd.atapi_scsi_cdb[0] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+                               (task->ata_task.atapi_packet[1] << 8) |
+                               (task->ata_task.atapi_packet[2] << 16) |
+                               (task->ata_task.atapi_packet[3] << 24)));
+                       sata_cmd.atapi_scsi_cdb[1] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+                               (task->ata_task.atapi_packet[5] << 8) |
+                               (task->ata_task.atapi_packet[6] << 16) |
+                               (task->ata_task.atapi_packet[7] << 24)));
+                       sata_cmd.atapi_scsi_cdb[2] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+                               (task->ata_task.atapi_packet[9] << 8) |
+                               (task->ata_task.atapi_packet[10] << 16) |
+                               (task->ata_task.atapi_packet[11] << 24)));
+                       sata_cmd.atapi_scsi_cdb[3] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+                               (task->ata_task.atapi_packet[13] << 8) |
+                               (task->ata_task.atapi_packet[14] << 16) |
+                               (task->ata_task.atapi_packet[15] << 24)));
+       }
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               return 0;
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+                                               &sata_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+       return ret;
+}
+
+/**
+ * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+       struct phy_start_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTART;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+       /*
+        ** [0:7]       PHY Identifier
+        ** [8:11]      link rate 1.5G, 3G, 6G
+        ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
+        ** [14]        0b disable spin up hold; 1b enable spin up hold
+        ** [15] ob no change in current PHY analig setup 1b enable using SPAST
+        */
+       payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+                       LINKMODE_AUTO | LINKRATE_15 |
+                       LINKRATE_30 | LINKRATE_60 | phy_id);
+       /* SSC Disable and SAS Analog ST configuration */
+       /**
+       payload.ase_sh_lm_slr_phyid =
+               cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
+               LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
+               phy_id);
+       Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
+       **/
+
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
+       payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+       memcpy(payload.sas_identify.sas_addr,
+               pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+       payload.sas_identify.phy_id = phy_id;
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+       u8 phy_id)
+{
+       struct phy_stop_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTOP;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+       payload.phy_id = cpu_to_le32(phy_id);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_device *pm8001_dev, u32 flag)
+{
+       struct reg_dev_req payload;
+       u32     opc;
+       u32 stp_sspsmp_sata = 0x4;
+       struct inbound_queue_table *circularQ;
+       u32 linkrate, phy_id;
+       int rc, tag = 0xdeadbeef;
+       struct pm8001_ccb_info *ccb;
+       u8 retryFlag = 0x1;
+       u16 firstBurstSize = 0;
+       u16 ITNT = 2000;
+       struct domain_device *dev = pm8001_dev->sas_device;
+       struct domain_device *parent_dev = dev->parent;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&payload, 0, sizeof(payload));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return rc;
+       ccb = &pm8001_ha->ccb_info[tag];
+       ccb->device = pm8001_dev;
+       ccb->ccb_tag = tag;
+       payload.tag = cpu_to_le32(tag);
+
+       if (flag == 1) {
+               stp_sspsmp_sata = 0x02; /*direct attached sata */
+       } else {
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
+                       stp_sspsmp_sata = 0x00; /* stp*/
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+                       stp_sspsmp_sata = 0x01; /*ssp or smp*/
+       }
+       if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+               phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+       else
+               phy_id = pm8001_dev->attached_phy;
+
+       opc = OPC_INB_REG_DEV;
+
+       linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+                       pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+
+       payload.phyid_portid =
+               cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+               ((phy_id & 0xFF) << 8));
+
+       payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
+               ((linkrate & 0x0F) << 24) |
+               ((stp_sspsmp_sata & 0x03) << 28));
+       payload.firstburstsize_ITNexustimeout =
+               cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+
+       memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+               SAS_ADDR_SIZE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op)
+{
+       struct local_phy_ctl_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+       memset(&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(1);
+       payload.phyop_phyid =
+               cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return ret;
+}
+
+static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+#ifdef PM8001_USE_MSIX
+       return 1;
+#endif
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+       if (value)
+               return 1;
+       return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm80xx_chip_interrupt_enable(pm8001_ha, vec);
+       return IRQ_HANDLED;
+}
+
+const struct pm8001_dispatch pm8001_80xx_dispatch = {
+       .name                   = "pmc80xx",
+       .chip_init              = pm80xx_chip_init,
+       .chip_soft_rst          = pm80xx_chip_soft_rst,
+       .chip_rst               = pm80xx_hw_chip_rst,
+       .chip_iounmap           = pm8001_chip_iounmap,
+       .isr                    = pm80xx_chip_isr,
+       .is_our_interupt        = pm80xx_chip_is_our_interupt,
+       .isr_process_oq         = process_oq,
+       .interrupt_enable       = pm80xx_chip_interrupt_enable,
+       .interrupt_disable      = pm80xx_chip_interrupt_disable,
+       .make_prd               = pm8001_chip_make_sg,
+       .smp_req                = pm80xx_chip_smp_req,
+       .ssp_io_req             = pm80xx_chip_ssp_io_req,
+       .sata_req               = pm80xx_chip_sata_req,
+       .phy_start_req          = pm80xx_chip_phy_start_req,
+       .phy_stop_req           = pm80xx_chip_phy_stop_req,
+       .reg_dev_req            = pm80xx_chip_reg_dev_req,
+       .dereg_dev_req          = pm8001_chip_dereg_dev_req,
+       .phy_ctl_req            = pm80xx_chip_phy_ctl_req,
+       .task_abort             = pm8001_chip_abort_task,
+       .ssp_tm_req             = pm8001_chip_ssp_tm_req,
+       .get_nvmd_req           = pm8001_chip_get_nvmd_req,
+       .set_nvmd_req           = pm8001_chip_set_nvmd_req,
+       .fw_flash_update_req    = pm8001_chip_fw_flash_update_req,
+       .set_dev_state_req      = pm8001_chip_set_dev_state_req,
+};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644 (file)
index 0000000..2b760ba
--- /dev/null
@@ -0,0 +1,1523 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions, and the following disclaimer,
+ *     without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *     substantially similar to the "NO WARRANTY" disclaimer below
+ *     ("Disclaimer") and any redistribution must be conditioned upon
+ *     including a substantially similar Disclaimer requirement for further
+ *     binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *     of any contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO                           1       /* 0x000 */
+#define OPC_INB_PHYSTART                       4       /* 0x004 */
+#define OPC_INB_PHYSTOP                                5       /* 0x005 */
+#define OPC_INB_SSPINIIOSTART                  6       /* 0x006 */
+#define OPC_INB_SSPINITMSTART                  7       /* 0x007 */
+/* 0x8 RESV IN SPCv */
+#define OPC_INB_RSVD                           8       /* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT              9       /* 0x009 */
+#define OPC_INB_SSPTGTIOSTART                  10      /* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART                 11      /* 0x00B */
+/* 0xC, 0xD, 0xE removed in SPCv */
+#define OPC_INB_SSP_ABORT                      15      /* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE               16      /* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE                 17      /* 0x011 */
+#define OPC_INB_SMP_REQUEST                    18      /* 0x012 */
+/* 0x13 SMP_RESPONSE is removed in SPCv */
+#define OPC_INB_SMP_ABORT                      20      /* 0x014 */
+/* 0x16 RESV IN SPCv */
+#define OPC_INB_RSVD1                          22      /* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART              23      /* 0x017 */
+#define OPC_INB_SATA_ABORT                     24      /* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL              25      /* 0x019 */
+/* 0x1A RESV IN SPCv */
+#define OPC_INB_RSVD2                          26      /* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE                        32      /* 0x020 */
+#define OPC_INB_GPIO                           34      /* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END                35      /* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE               36      /* 0x024 */
+/* 0x25 RESV IN SPCv */
+#define OPC_INB_RSVD3                          37      /* 0x025 */
+#define OPC_INB_GET_TIME_STAMP                 38      /* 0x026 */
+#define OPC_INB_PORT_CONTROL                   39      /* 0x027 */
+#define OPC_INB_GET_NVMD_DATA                  40      /* 0x028 */
+#define OPC_INB_SET_NVMD_DATA                  41      /* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE               42      /* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE               43      /* 0x02B */
+#define OPC_INB_SET_DEV_INFO                   44      /* 0x02C */
+/* 0x2D RESV IN SPCv */
+#define OPC_INB_RSVD4                          45      /* 0x02D */
+#define OPC_INB_SGPIO_REGISTER                 46      /* 0x02E */
+#define OPC_INB_PCIE_DIAG_EXEC                 47      /* 0x02F */
+#define OPC_INB_SET_CONTROLLER_CONFIG          48      /* 0x030 */
+#define OPC_INB_GET_CONTROLLER_CONFIG          49      /* 0x031 */
+#define OPC_INB_REG_DEV                                50      /* 0x032 */
+#define OPC_INB_SAS_HW_EVENT_ACK               51      /* 0x033 */
+#define OPC_INB_GET_DEVICE_INFO                        52      /* 0x034 */
+#define OPC_INB_GET_PHY_PROFILE                        53      /* 0x035 */
+#define OPC_INB_FLASH_OP_EXT                   54      /* 0x036 */
+#define OPC_INB_SET_PHY_PROFILE                        55      /* 0x037 */
+#define OPC_INB_KEK_MANAGEMENT                 256     /* 0x100 */
+#define OPC_INB_DEK_MANAGEMENT                 257     /* 0x101 */
+#define OPC_INB_SSP_INI_DIF_ENC_IO             258     /* 0x102 */
+#define OPC_INB_SATA_DIF_ENC_IO                        259     /* 0x103 */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO                                   1       /* 0x001 */
+#define OPC_OUB_RSVD                                   4       /* 0x004 */
+#define OPC_OUB_SSP_COMP                               5       /* 0x005 */
+#define OPC_OUB_SMP_COMP                               6       /* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL                                7       /* 0x007 */
+#define OPC_OUB_RSVD1                                  10      /* 0x00A */
+#define OPC_OUB_DEREG_DEV                              11      /* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE                         12      /* 0x00C */
+#define OPC_OUB_SATA_COMP                              13      /* 0x00D */
+#define OPC_OUB_SATA_EVENT                             14      /* 0x00E */
+#define OPC_OUB_SSP_EVENT                              15      /* 0x00F */
+#define OPC_OUB_RSVD2                                  16      /* 0x010 */
+/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
+#define OPC_OUB_SSP_RECV_EVENT                         18      /* 0x012 */
+#define OPC_OUB_RSVD3                                  19      /* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE                                20      /* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE                          22      /* 0x016 */
+#define OPC_OUB_GPIO_EVENT                             23      /* 0x017 */
+#define OPC_OUB_GENERAL_EVENT                          24      /* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP                          26      /* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP                         27      /* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END                        28      /* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE                       29      /* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP                         30      /* 0x01E */
+#define OPC_OUB_RSVD4                                  31      /* 0x01F */
+#define OPC_OUB_PORT_CONTROL                           32      /* 0x020 */
+#define OPC_OUB_SKIP_ENTRY                             33      /* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP                          34      /* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA                          35      /* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA                          36      /* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL                  37      /* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE                       38      /* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE                       39      /* 0x027 */
+#define OPC_OUB_SET_DEV_INFO                           40      /* 0x028 */
+#define OPC_OUB_RSVD5                                  41      /* 0x029 */
+#define OPC_OUB_HW_EVENT                               1792    /* 0x700 */
+#define OPC_OUB_DEV_HANDLE_ARRIV                       1824    /* 0x720 */
+#define OPC_OUB_THERM_HW_EVENT                         1840    /* 0x730 */
+#define OPC_OUB_SGPIO_RESP                             2094    /* 0x82E */
+#define OPC_OUB_PCIE_DIAG_EXECUTE                      2095    /* 0x82F */
+#define OPC_OUB_DEV_REGIST                             2098    /* 0x832 */
+#define OPC_OUB_SAS_HW_EVENT_ACK                       2099    /* 0x833 */
+#define OPC_OUB_GET_DEVICE_INFO                                2100    /* 0x834 */
+/* spcv specific commands */
+#define OPC_OUB_PHY_START_RESP                         2052    /* 0x804 */
+#define OPC_OUB_PHY_STOP_RESP                          2053    /* 0x805 */
+#define OPC_OUB_SET_CONTROLLER_CONFIG                  2096    /* 0x830 */
+#define OPC_OUB_GET_CONTROLLER_CONFIG                  2097    /* 0x831 */
+#define OPC_OUB_GET_PHY_PROFILE                                2101    /* 0x835 */
+#define OPC_OUB_FLASH_OP_EXT                           2102    /* 0x836 */
+#define OPC_OUB_SET_PHY_PROFILE                                2103    /* 0x837 */
+#define OPC_OUB_KEK_MANAGEMENT_RESP                    2304    /* 0x900 */
+#define OPC_OUB_DEK_MANAGEMENT_RESP                    2305    /* 0x901 */
+#define OPC_OUB_SSP_COALESCED_COMP_RESP                        2306    /* 0x902 */
+
+/* for phy start*/
+#define SSC_DISABLE_15                 (0x01 << 16)
+#define SSC_DISABLE_30                 (0x02 << 16)
+#define SSC_DISABLE_60                 (0x04 << 16)
+#define SAS_ASE                                (0x01 << 15)
+#define SPINHOLD_DISABLE               (0x00 << 14)
+#define SPINHOLD_ENABLE                        (0x01 << 14)
+#define LINKMODE_SAS                   (0x01 << 12)
+#define LINKMODE_DSATA                 (0x02 << 12)
+#define LINKMODE_AUTO                  (0x03 << 12)
+#define LINKRATE_15                    (0x01 << 8)
+#define LINKRATE_30                    (0x02 << 8)
+#define LINKRATE_60                    (0x06 << 8)
+
+/* Thermal related */
+#define        THERMAL_ENABLE                  0x1
+#define        THERMAL_LOG_ENABLE              0x1
+#define THERMAL_OP_CODE                        0x6
+#define LTEMPHIL                        70
+#define RTEMPHIL                       100
+
+/* Encryption info */
+#define SCRATCH_PAD3_ENC_DISABLED      0x00000000
+#define SCRATCH_PAD3_ENC_DIS_ERR       0x00000001
+#define SCRATCH_PAD3_ENC_ENA_ERR       0x00000002
+#define SCRATCH_PAD3_ENC_READY         0x00000003
+#define SCRATCH_PAD3_ENC_MASK          SCRATCH_PAD3_ENC_READY
+
+#define SCRATCH_PAD3_XTS_ENABLED               (1 << 14)
+#define SCRATCH_PAD3_SMA_ENABLED               (1 << 4)
+#define SCRATCH_PAD3_SMB_ENABLED               (1 << 5)
+#define SCRATCH_PAD3_SMF_ENABLED               0
+#define SCRATCH_PAD3_SM_MASK                   0x000000F0
+#define SCRATCH_PAD3_ERR_CODE                  0x00FF0000
+
+#define SEC_MODE_SMF                           0x0
+#define SEC_MODE_SMA                           0x100
+#define SEC_MODE_SMB                           0x200
+#define CIPHER_MODE_ECB                                0x00000001
+#define CIPHER_MODE_XTS                                0x00000002
+#define KEK_MGMT_SUBOP_KEYCARDUPDATE           0x4
+
+/* SAS protocol timer configuration page */
+#define SAS_PROTOCOL_TIMER_CONFIG_PAGE  0x04
+#define STP_MCT_TMO                     32
+#define SSP_MCT_TMO                     32
+#define SAS_MAX_OPEN_TIME                              5
+#define SMP_MAX_CONN_TIMER              0xFF
+#define STP_FRM_TIMER                   0
+#define STP_IDLE_TIME                   5 /* 5 us; controller default */
+#define SAS_MFD                         0
+#define SAS_OPNRJT_RTRY_INTVL           2
+#define SAS_DOPNRJT_RTRY_TMO            128
+#define SAS_COPNRJT_RTRY_TMO            128
+
+/*
+  Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
+  Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
+  is DOPNRJT_RTRY_TMO
+*/
+#define SAS_DOPNRJT_RTRY_THR            23438
+#define SAS_COPNRJT_RTRY_THR            23438
+#define SAS_MAX_AIP                     0x200000
+#define IT_NEXUS_TIMEOUT       0x7D0
+#define PORT_RECOVERY_TIMEOUT  ((IT_NEXUS_TIMEOUT/100) + 30)
+
+struct mpi_msg_hdr {
+       __le32  header; /* Bits [11:0] - Message operation code */
+       /* Bits [15:12] - Message Category */
+       /* Bits [21:16] - Outboundqueue ID for the
+       operation completion message */
+       /* Bits [23:22] - Reserved */
+       /* Bits [28:24] - Buffer Count, indicates how
+       many buffer are allocated for the massage */
+       /* Bits [30:29] - Reserved */
+       /* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (128 bytes)
+ */
+struct phy_start_req {
+       __le32  tag;
+       __le32  ase_sh_lm_slr_phyid;
+       struct sas_identify_frame sas_identify; /* 28 Bytes */
+       __le32 spasti;
+       u32     reserved[21];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (128 bytes)
+ */
+struct phy_stop_req {
+       __le32  tag;
+       __le32  phy_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/* set device bits fis - device to host */
+struct set_dev_bits_fis {
+       u8      fis_type;       /* 0xA1*/
+       u8      n_i_pmport;
+       /* b7 : n Bit. Notification bit. If set device needs attention. */
+       /* b6 : i Bit. Interrupt Bit */
+       /* b5-b4: reserved2 */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u32     _r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct pio_setup_fis {
+       u8      fis_type;       /* 0x5f */
+       u8      i_d_pmPort;
+       /* b7 : reserved */
+       /* b6 : i bit. Interrupt bit */
+       /* b5 : d bit. data transfer direction. set to 1 for device to host
+       xfer */
+       /* b4 : reserved */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u8      lbal;
+       u8      lbam;
+       u8      lbah;
+       u8      device;
+       u8      lbal_exp;
+       u8      lbam_exp;
+       u8      lbah_exp;
+       u8      _r_a;
+       u8      sector_count;
+       u8      sector_count_exp;
+       u8      _r_b;
+       u8      e_status;
+       u8      _r_c[2];
+       u8      transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to describe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u32     sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+/* updated outbound struct for spcv */
+
+struct hw_event_resp {
+       __le32  lr_status_evt_portid;
+       __le32  evt_param;
+       __le32  phyid_npip_portstate;
+       struct sas_identify_frame       sas_identify;
+       struct dev_to_host_fis  sata_fis;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure for thermal event notification
+ */
+
+struct thermal_hw_event {
+       __le32  thermal_event;
+       __le32  rht_lht;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+       __le32  tag;
+       __le32  phyid_portid;
+       __le32  dtype_dlr_mcn_ir_retry;
+       __le32  firstburstsize_ITNexustimeout;
+       u8      sas_addr[SAS_ADDR_SIZE];
+       __le32  upper_device_id;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+       __le32  tag;
+       __le32  device_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration (64 bytes)
+ */
+struct dev_reg_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  device_id;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+       __le32  tag;
+       __le32  phyop_phyid;
+       u32     reserved1[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+ struct local_phy_ctl_resp {
+       __le32  tag;
+       __le32  phyop_phyid;
+       __le32  status;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x000000FF
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+       __le32  tag;
+       __le32  portop_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+struct hw_event_ack_req {
+       __le32  tag;
+       __le32  phyid_sea_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_START Response Command
+ * indicates the completion of PHY_START command (64 bytes)
+ */
+struct phy_start_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_STOP Response Command
+ * indicates the completion of PHY_STOP command (64 bytes)
+ */
+struct phy_stop_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion (n bytes)
+ */
+struct ssp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       __le32  ssptag_rescv_rescpad;
+       struct ssp_response_iu ssp_resp_iu;
+       __le32  residual_count;
+} __attribute__((packed, aligned(4)));
+
+#define SSP_RESCV_BIT  0x00010000
+
+/*
+ * brief the data structure of SATA EVNET response
+ * use to indicate a SATA Completion (64 bytes)
+ */
+struct sata_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       u32 reserved;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sata_addr_h32;
+       __le32 sata_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion (64 bytes)
+ */
+struct ssp_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       __le32 ssp_tag;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sas_addr_h32;
+       __le32 sas_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+       __le32  status;
+       __le32  inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+#define GENERAL_EVENT_PAYLOAD  14
+#define OPCODE_BITS    0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  len_ip_ir;
+       /* Bits [0] - Indirect response */
+       /* Bits [1] - Indirect Payload */
+       /* Bits [15:2] - Reserved */
+       /* Bits [23:16] - direct payload Len */
+       /* Bits [31:24] - Reserved */
+       u8      smp_req16[16];
+       union {
+               u8      smp_req[32];
+               struct {
+                       __le64 long_req_addr;/* sg dma address, LE */
+                       __le32 long_req_size;/* LE */
+                       u32     _r_a;
+                       __le64 long_resp_addr;/* sg dma address, LE */
+                       __le32 long_resp_size;/* LE */
+                       u32     _r_b;
+                       } long_smp_req;/* sequencer extension */
+       };
+       __le32  rsvd[16];
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u8      _r_a[252];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  tag_to_abort;
+       __le32  abort_all;
+       u32     reserved[27];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK             0x3
+#define ABORT_SINGLE           0x0
+#define ABORT_ALL              0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  scp;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+       __le32  tag;
+       __le32  operation_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req {
+       __le32  tag;
+       __le32  cmdtype_cmddesc_phyid;
+       __le32  pat1_pat2;
+       __le32  threshold;
+       __le32  codepat_errmsk;
+       __le32  pmon;
+       __le32  pERF1CTL;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  nds;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+
+struct sata_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  ncqtag_atap_dir_m_dad;
+       struct host_to_dev_fis  sata_fis;
+       u32     reserved1;
+       u32     reserved2;      /* dword 11. rsvd for normal I/O. */
+                               /* EPLE Descl for enc I/O */
+       u32     addr_low;       /* dword 12. rsvd for enc I/O */
+       u32     addr_high;      /* dword 13. reserved for enc I/O */
+       __le32  len;            /* dword 14: length for normal I/O. */
+                               /* EPLE Desch for enc I/O */
+       __le32  esgl;           /* dword 15. rsvd for enc I/O */
+       __le32  atapi_scsi_cdb[4];      /* dword 16-19. rsvd for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       __le32  key_index_mode; /* dword 20 */
+       __le32  sector_cnt_enss;/* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28. Encryption SGL address high */
+       __le32  enc_addr_high;  /* dword 29. Encryption SGL address low */
+       __le32  enc_len;        /* dword 30. Encryption length */
+       __le32  enc_esgl;       /* dword 31. Encryption esgl bit */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  relate_tag;
+       __le32  tmf;
+       u8      lun[8];
+       __le32  ds_ads_m;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_info_unit {
+       u8      lun[8];/* SCSI Logical Unit Number */
+       u8      reserved1;/* reserved */
+       u8      efb_prio_attr;
+       /* B7 : enabledFirstBurst */
+       /* B6-3 : taskPriority */
+       /* B2-0 : taskAttribute */
+       u8      reserved2;      /* reserved */
+       u8      additional_cdb_len;
+       /* B7-2 : additional_cdb_len */
+       /* B1-0 : reserved */
+       u8      cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+struct ssp_ini_io_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dad_dir_m_tlr;
+       struct ssp_info_unit    ssp_iu;
+       __le32  addr_low;       /* dword 12: sgl low for normal I/O. */
+                               /* epl_descl for encryption I/O */
+       __le32  addr_high;      /* dword 13: sgl hi for normal I/O */
+                               /* dpl_descl for encryption I/O */
+       __le32  len;            /* dword 14: len for normal I/O. */
+                               /* edpl_desch for encryption I/O */
+       __le32  esgl;           /* dword 15: ESGL bit for normal I/O. */
+                               /* user defined tag mask for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       u8      udt[12];        /* dword 16-18 */
+       __le32  sectcnt_ios;    /* dword 19 */
+       __le32  key_cmode;      /* dword 20 */
+       __le32  ks_enss;        /* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28: Encryption sgl addr low */
+       __le32  enc_addr_high;  /* dword 29: Encryption sgl addr hi */
+       __le32  enc_len;        /* dword 30: Encryption length */
+       __le32  enc_esgl;       /* dword 31: ESGL bit for encryption */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
+ * use to initiate SSP I/O operation with optional DIF/ENC
+ */
+struct ssp_dif_enc_io_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dirMTlr;
+       __le32  sspiu0;
+       __le32  sspiu1;
+       __le32  sspiu2;
+       __le32  sspiu3;
+       __le32  sspiu4;
+       __le32  sspiu5;
+       __le32  sspiu6;
+       __le32  epl_des;
+       __le32  dpl_desl_ndplr;
+       __le32  dpl_desh;
+       __le32  uum_uuv_bss_difbits;
+       u8      udt[12];
+       __le32  sectcnt_ios;
+       __le32  key_cmode;
+       __le32  ks_enss;
+       __le32  keytagl;
+       __le32  keytagh;
+       __le32  twk_val0;
+       __le32  twk_val1;
+       __le32  twk_val2;
+       __le32  twk_val3;
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len;
+       __le32  esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+       __le32  tag;
+       __le32  cur_image_offset;
+       __le32  cur_image_len;
+       __le32  total_image_len;
+       u32     reserved0[7];
+       __le32  sgl_addr_lo;
+       __le32  sgl_addr_hi;
+       __le32  len;
+       __le32  ext_reserved;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+ struct fw_flash_Update_resp {
+       __le32  tag;
+       __le32  status;
+       u32     reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+struct set_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_req {
+       __le32  tag;
+       __le32  cfg_pg[14];
+       u32     reserved[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET CONTROLLER CONFIG COMMAND
+ * use to get controller configuration page
+ */
+struct get_ctrl_cfg_req {
+       __le32  tag;
+       __le32  pgcd;
+       __le32  int_vec;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for KEK_MANAGEMENT COMMAND
+ * use for KEK management
+ */
+struct kek_mgmt_req {
+       __le32  tag;
+       __le32  new_curidx_ksop;
+       u32     reserved;
+       __le32  kblob[12];
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for DEK_MANAGEMENT COMMAND
+ * use for DEK management
+ */
+struct dek_mgmt_req {
+       __le32  tag;
+       __le32  kidx_dsop;
+       __le32  dekidx;
+       __le32  addr_l;
+       __le32  addr_h;
+       __le32  nent;
+       __le32  dbf_tblsize;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct set_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct get_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       __le32  profile[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for EXT FLASH PARTITION
+ * use to manage ext flash partition
+ */
+struct ext_flash_partition_req {
+       __le32  tag;
+       __le32  cmd;
+       __le32  offset;
+       __le32  len;
+       u32     reserved[7];
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len1;
+       __le32  ext;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define TWI_DEVICE     0x0
+#define C_SEEPROM      0x1
+#define VPD_FLASH      0x4
+#define AAP1_RDUMP     0x5
+#define IOP_RDUMP      0x6
+#define EXPAN_ROM      0x7
+
+#define IPMode         0x80000000
+#define NVMD_TYPE      0x0000000F
+#define NVMD_STAT      0x0000FFFF
+#define NVMD_LEN       0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+       __le32          tag;
+       __le32          ir_tda_bn_dps_das_nvm;
+       __le32          dlen_status;
+       __le32          nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+       __le32          tag;
+       __le32          status;
+       u32             reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+       __le32          tag;
+       __le32          cmdtype_cmddesc_phyid;
+       __le32          Status;
+       __le32          ReportData;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+       __le32          tag;
+       __le32          status;
+       __le32          device_id;
+       __le32          pds_nds;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - begins */
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr_pgcd;
+       u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+struct get_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr;
+       __le32 confg_page[12];
+} __attribute__((packed, aligned(4)));
+
+struct kek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kidx_new_curr_ksop;
+       __le32 err_qlfr;
+       u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+struct dek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kekidx_tbls_dsop;
+       __le32 dekidx;
+       __le32 err_qlfr;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct get_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct flash_op_ext_resp {
+       __le32 tag;
+       __le32 cmd;
+       __le32 status;
+       __le32 epart_size;
+       __le32 epart_sect_size;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct set_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_coalesced_comp_resp {
+       __le32 coal_cnt;
+       __le32 tag0;
+       __le32 ssp_tag0;
+       __le32 tag1;
+       __le32 ssp_tag1;
+       __le32 add_tag_ssp_tag[10];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - ends */
+
+/* brief data structure for SAS protocol timer configuration page.
+ *
+ */
+struct SASProtocolTimerConfig {
+       __le32 pageCode;                        /* 0 */
+       __le32 MST_MSI;                         /* 1 */
+       __le32 STP_SSP_MCT_TMO;                 /* 2 */
+       __le32 STP_FRM_TMO;                     /* 3 */
+       __le32 STP_IDLE_TMO;                    /* 4 */
+       __le32 OPNRJT_RTRY_INTVL;               /* 5 */
+       __le32 Data_Cmd_OPNRJT_RTRY_TMO;        /* 6 */
+       __le32 Data_Cmd_OPNRJT_RTRY_THR;        /* 7 */
+       __le32 MAX_AIP;                         /* 8 */
+} __attribute__((packed, aligned(4)));
+
+typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START                   0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE           0x02
+#define HW_EVENT_PHY_STOP_STATUS               0x03
+#define HW_EVENT_SAS_PHY_UP                    0x04
+#define HW_EVENT_SATA_PHY_UP                   0x05
+#define HW_EVENT_SATA_SPINUP_HOLD              0x06
+#define HW_EVENT_PHY_DOWN                      0x07
+#define HW_EVENT_PORT_INVALID                  0x08
+#define HW_EVENT_BROADCAST_CHANGE              0x09
+#define HW_EVENT_PHY_ERROR                     0x0A
+#define HW_EVENT_BROADCAST_SES                 0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR             0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED           0x0D
+#define HW_EVENT_MALFUNCTION                   0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT              0x0F
+#define HW_EVENT_BROADCAST_EXP                 0x10
+#define HW_EVENT_PHY_START_STATUS              0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD                0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR      0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION       0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH  0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED     0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO       0x17
+#define HW_EVENT_PORT_RECOVER                  0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO          0x19
+#define HW_EVENT_PORT_RESET_COMPLETE           0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT           0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED                   0x00
+#define PORT_VALID                             0x01
+#define PORT_LOSTCOMM                          0x02
+#define PORT_IN_RESET                          0x04
+#define PORT_3RD_PARTY_RESET                   0x07
+#define PORT_INVALID                           0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS                             0x00
+#define IO_ABORTED                             0x01
+#define IO_OVERFLOW                            0x02
+#define IO_UNDERFLOW                           0x03
+#define IO_FAILED                              0x04
+#define IO_ABORT_RESET                         0x05
+#define IO_NOT_VALID                           0x06
+#define IO_NO_DEVICE                           0x07
+#define IO_ILLEGAL_PARAMETER                   0x08
+#define IO_LINK_FAILURE                                0x09
+#define IO_PROG_ERROR                          0x0A
+
+#define IO_EDC_IN_ERROR                                0x0B
+#define IO_EDC_OUT_ERROR                       0x0C
+#define IO_ERROR_HW_TIMEOUT                    0x0D
+#define IO_XFER_ERROR_BREAK                    0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY            0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED       0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION               0x11
+#define IO_OPEN_CNX_ERROR_BREAK                                0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS                        0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION              0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED        0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY           0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION            0x17
+/* This error code 0x18 is not used on SPCv */
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR                        0x18
+#define IO_XFER_ERROR_NAK_RECEIVED                     0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT                  0x1A
+#define IO_XFER_ERROR_PEER_ABORTED                     0x1B
+#define IO_XFER_ERROR_RX_FRAME                         0x1C
+#define IO_XFER_ERROR_DMA                              0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT                   0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT                        0x1F
+#define IO_XFER_ERROR_SATA                             0x20
+
+/* This error code 0x22 is not used on SPCv */
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST              0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE                        0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE                 0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT                     0x24
+/* This error code 0x25 is not used on SPCv */
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR              0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE                 0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN                 0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED            0x28
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT                0x30
+
+/* The following error code 0x31 and 0x32 are not using (obsolete) */
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK   0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK        0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH                  0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN               0x35
+#define IO_XFER_CMD_FRAME_ISSUED                       0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE                 0x37
+#define IO_PORT_IN_RESET                               0x38
+#define IO_DS_NON_OPERATIONAL                          0x39
+#define IO_DS_IN_RECOVERY                              0x3A
+#define IO_TM_TAG_NOT_FOUND                            0x3B
+#define IO_XFER_PIO_SETUP_ERROR                                0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR                   0x3D
+#define IO_DS_IN_ERROR                                 0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY             0x3F
+#define IO_ABORT_IN_PROGRESS                           0x40
+#define IO_ABORT_DELAYED                               0x41
+#define IO_INVALID_LENGTH                              0x42
+
+/********** additional response event values *****************/
+
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT         0x43
+#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED   0x44
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO       0x45
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST                0x46
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE   0x47
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED        0x48
+#define IO_DS_INVALID                                  0x49
+/* WARNING: the value is not contiguous from here */
+#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR    0x52
+#define IO_XFER_DMA_ACTIVATE_TIMEOUT           0x53
+#define IO_XFER_ERROR_INTERNAL_CRC_ERROR       0x54
+#define MPI_IO_RQE_BUSY_FULL                   0x55
+#define IO_XFER_ERR_EOB_DATA_OVERRUN           0x56
+#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME     0x57
+#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED       0x58
+
+#define MPI_ERR_IO_RESOURCE_UNAVAILABLE                0x1004
+#define MPI_ERR_ATAPI_DEVICE_BUSY              0x1024
+
+#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS                0x2040
+/*
+ * An encryption IO request failed due to DEK Key Tag mismatch.
+ * The key tag supplied in the encryption IOMB does not match with
+ * the Key Tag in the referenced DEK Entry.
+ */
+#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH      0x2041
+#define IO_XFR_ERROR_CIPHER_MODE_INVALID       0x2042
+/*
+ * An encryption I/O request failed because the initial value (IV)
+ * in the unwrapped DEK blob didn't match the IV used to unwrap it.
+ */
+#define IO_XFR_ERROR_DEK_IV_MISMATCH           0x2043
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR   0x2044
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_INTERNAL_RAM              0x2045
+/*
+ * An encryption I/O request failed
+ * because the DEK index specified in the I/O was outside the bounds of
+ * the total number of entries in the host DEK table.
+ */
+#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
+
+/* define DIF IO response error status code */
+#define IO_XFR_ERROR_DIF_MISMATCH                      0x3000
+#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH      0x3001
+#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH                0x3002
+#define IO_XFR_ERROR_DIF_CRC_MISMATCH                  0x3003
+
+/* define operator management response status and error qualifier code */
+#define OPR_MGMT_OP_NOT_SUPPORTED                      0x2060
+#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL         0x2061
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND          0x2062
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH                0x2063
+#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED      0x2064
+#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL           0x2022
+#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE   0x2023
+/***************** additional response event values ***************/
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC                       0x2023
+
+/* MSGU CONFIGURATION TABLE*/
+
+#define SPCv_MSGU_CFG_TABLE_UPDATE             0x01
+#define SPCv_MSGU_CFG_TABLE_RESET              0x02
+#define SPCv_MSGU_CFG_TABLE_FREEZE             0x04
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE           0x08
+#define MSGU_IBDB_SET                          0x00
+#define MSGU_HOST_INT_STATUS                   0x08
+#define MSGU_HOST_INT_MASK                     0x0C
+#define MSGU_IOPIB_INT_STATUS                  0x18
+#define MSGU_IOPIB_INT_MASK                    0x1C
+#define MSGU_IBDB_CLEAR                                0x20
+
+#define MSGU_MSGU_CONTROL                      0x24
+#define MSGU_ODR                               0x20
+#define MSGU_ODCR                              0x28
+
+#define MSGU_ODMR                              0x30
+#define MSGU_ODMR_U                            0x34
+#define MSGU_ODMR_CLR                          0x38
+#define MSGU_ODMR_CLR_U                                0x3C
+#define MSGU_OD_RSVD                           0x40
+
+#define MSGU_SCRATCH_PAD_0                     0x44
+#define MSGU_SCRATCH_PAD_1                     0x48
+#define MSGU_SCRATCH_PAD_2                     0x4C
+#define MSGU_SCRATCH_PAD_3                     0x50
+#define MSGU_HOST_SCRATCH_PAD_0                        0x54
+#define MSGU_HOST_SCRATCH_PAD_1                        0x58
+#define MSGU_HOST_SCRATCH_PAD_2                        0x5C
+#define MSGU_HOST_SCRATCH_PAD_3                        0x60
+#define MSGU_HOST_SCRATCH_PAD_4                        0x64
+#define MSGU_HOST_SCRATCH_PAD_5                        0x68
+#define MSGU_HOST_SCRATCH_PAD_6                        0x6C
+#define MSGU_HOST_SCRATCH_PAD_7                        0x70
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL                  0xFFFFFFFF/* mask all
+                                       interrupt vector */
+#define ODMR_CLEAR_ALL                 0       /* clear all
+                                       interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL                 0xFFFFFFFF /* mask all
+                                       interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET              0x2000
+#define MSIX_TABLE_ELEMENT_SIZE                0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET  0xC
+#define MSIX_TABLE_BASE                        (MSIX_TABLE_OFFSET + \
+                                       MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE         0x1
+#define MSIX_INTERRUPT_ENABLE          0x0
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD_RAAE_READY         0x3
+#define SCRATCH_PAD_ILA_READY          0xC
+#define SCRATCH_PAD_BOOT_LOAD_SUCCESS  0x0
+#define SCRATCH_PAD_IOP0_READY         0xC00
+#define SCRATCH_PAD_IOP1_READY         0x3000
+
+/* boot loader state */
+#define SCRATCH_PAD1_BOOTSTATE_MASK            0x70    /* Bit 4-6 */
+#define SCRATCH_PAD1_BOOTSTATE_SUCESS          0x0     /* Load successful */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM     0x10    /* HDA SEEPROM */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP   0x20    /* HDA BootStrap Pins */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET   0x30    /* HDA Soft Reset */
+#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR      0x40    /* HDA critical error */
+#define SCRATCH_PAD1_BOOTSTATE_R1              0x50    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_R2              0x60    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_FATAL           0x70    /* Fatal Error */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR               0x00    /* power on state */
+#define SCRATCH_PAD2_SFR               0x01    /* soft reset state */
+#define SCRATCH_PAD2_ERR               0x02    /* error state */
+#define SCRATCH_PAD2_RDY               0x03    /* ready state */
+#define SCRATCH_PAD2_FWRDY_RST         0x04    /* FW rdy for soft reset flag */
+#define SCRATCH_PAD2_IOPRDY_RST                0x08    /* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK                0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED          0x000003FC/* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK         0xFFFFFC00 /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK         0x00000003 /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET          0x00 /* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION                0x04 /* DWORD 0x01 */
+#define MAIN_FW_REVISION               0x08 /* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET            0x10 /* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET          0x14 /* DWORD 0x05 */
+#define MAIN_GST_OFFSET                        0x18 /* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET                        0x1C /* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET                        0x20 /* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET                0x24 /* DWORD 0x09 */
+
+/* 0x28 - 0x4C - RSVD */
+#define MAIN_EVENT_CRC_CHECK           0x48 /* DWORD 0x12 */
+#define MAIN_EVENT_LOG_ADDR_HI         0x50 /* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO         0x54 /* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE       0x58 /* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION          0x5C /* DWORD 0x17 */
+#define MAIN_PCS_EVENT_LOG_ADDR_HI     0x60 /* DWORD 0x18 */
+#define MAIN_PCS_EVENT_LOG_ADDR_LO     0x64 /* DWORD 0x19 */
+#define MAIN_PCS_EVENT_LOG_BUFF_SIZE   0x68 /* DWORD 0x1A */
+#define MAIN_PCS_EVENT_LOG_OPTION      0x6C /* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT     0x70 /* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */
+#define MAIN_GPIO_LED_FLAGS_OFFSET     0x84 /* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET       0x88 /* DWORD 0x22 */
+
+#define MAIN_INT_VECTOR_TABLE_OFFSET   0x8C /* DWORD 0x23 */
+#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
+#define MAIN_PORT_RECOVERY_TIMER       0x94 /* DWORD 0x25 */
+#define MAIN_INT_REASSERTION_DELAY     0x98 /* DWORD 0x26 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET         0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET    0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET    0x08
+#define GST_MSGUTCNT_OFFSET            0x0C
+#define GST_IOPTCNT_OFFSET             0x10
+/* 0x14 - 0x34 - RSVD */
+#define GST_GPIO_INPUT_VAL             0x38
+/* 0x3c - 0x40 - RSVD */
+#define GST_RERRINFO_OFFSET0           0x44
+#define GST_RERRINFO_OFFSET1           0x48
+#define GST_RERRINFO_OFFSET2           0x4c
+#define GST_RERRINFO_OFFSET3           0x50
+#define GST_RERRINFO_OFFSET4           0x54
+#define GST_RERRINFO_OFFSET5           0x58
+#define GST_RERRINFO_OFFSET6           0x5c
+#define GST_RERRINFO_OFFSET7           0x60
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT           0x00
+#define GST_MPI_STATE_INIT             0x01
+#define GST_MPI_STATE_TERMINATION      0x02
+#define GST_MPI_STATE_ERROR            0x03
+#define GST_MPI_STATE_MASK             0x07
+
+/* Per SAS PHY Attributes */
+
+#define PSPA_PHYSTATE0_OFFSET          0x00 /* Dword V */
+#define PSPA_OB_HW_EVENT_PID0_OFFSET   0x04 /* DWORD V+1 */
+#define PSPA_PHYSTATE1_OFFSET          0x08 /* Dword V+2 */
+#define PSPA_OB_HW_EVENT_PID1_OFFSET   0x0C /* DWORD V+3 */
+#define PSPA_PHYSTATE2_OFFSET          0x10 /* Dword V+4 */
+#define PSPA_OB_HW_EVENT_PID2_OFFSET   0x14 /* DWORD V+5 */
+#define PSPA_PHYSTATE3_OFFSET          0x18 /* Dword V+6 */
+#define PSPA_OB_HW_EVENT_PID3_OFFSET   0x1C /* DWORD V+7 */
+#define PSPA_PHYSTATE4_OFFSET          0x20 /* Dword V+8 */
+#define PSPA_OB_HW_EVENT_PID4_OFFSET   0x24 /* DWORD V+9 */
+#define PSPA_PHYSTATE5_OFFSET          0x28 /* Dword V+10 */
+#define PSPA_OB_HW_EVENT_PID5_OFFSET   0x2C /* DWORD V+11 */
+#define PSPA_PHYSTATE6_OFFSET          0x30 /* Dword V+12 */
+#define PSPA_OB_HW_EVENT_PID6_OFFSET   0x34 /* DWORD V+13 */
+#define PSPA_PHYSTATE7_OFFSET          0x38 /* Dword V+14 */
+#define PSPA_OB_HW_EVENT_PID7_OFFSET   0x3C /* DWORD V+15 */
+#define PSPA_PHYSTATE8_OFFSET          0x40 /* DWORD V+16 */
+#define PSPA_OB_HW_EVENT_PID8_OFFSET   0x44 /* DWORD V+17 */
+#define PSPA_PHYSTATE9_OFFSET          0x48 /* DWORD V+18 */
+#define PSPA_OB_HW_EVENT_PID9_OFFSET   0x4C /* DWORD V+19 */
+#define PSPA_PHYSTATE10_OFFSET         0x50 /* DWORD V+20 */
+#define PSPA_OB_HW_EVENT_PID10_OFFSET  0x54 /* DWORD V+21 */
+#define PSPA_PHYSTATE11_OFFSET         0x58 /* DWORD V+22 */
+#define PSPA_OB_HW_EVENT_PID11_OFFSET  0x5C /* DWORD V+23 */
+#define PSPA_PHYSTATE12_OFFSET         0x60 /* DWORD V+24 */
+#define PSPA_OB_HW_EVENT_PID12_OFFSET  0x64 /* DWORD V+25 */
+#define PSPA_PHYSTATE13_OFFSET         0x68 /* DWORD V+26 */
+#define PSPA_OB_HW_EVENT_PID13_OFFSET  0x6c /* DWORD V+27 */
+#define PSPA_PHYSTATE14_OFFSET         0x70 /* DWORD V+28 */
+#define PSPA_OB_HW_EVENT_PID14_OFFSET  0x74 /* DWORD V+29 */
+#define PSPA_PHYSTATE15_OFFSET         0x78 /* DWORD V+30 */
+#define PSPA_OB_HW_EVENT_PID15_OFFSET  0x7c /* DWORD V+31 */
+/* end PSPA */
+
+/* inbound queue configuration offset - byte offset */
+#define IB_PROPERITY_OFFSET            0x00
+#define IB_BASE_ADDR_HI_OFFSET         0x04
+#define IB_BASE_ADDR_LO_OFFSET         0x08
+#define IB_CI_BASE_ADDR_HI_OFFSET      0x0C
+#define IB_CI_BASE_ADDR_LO_OFFSET      0x10
+#define IB_PIPCI_BAR                   0x14
+#define IB_PIPCI_BAR_OFFSET            0x18
+#define IB_RESERVED_OFFSET             0x1C
+
+/* outbound queue configuration offset - byte offset */
+#define OB_PROPERITY_OFFSET            0x00
+#define OB_BASE_ADDR_HI_OFFSET         0x04
+#define OB_BASE_ADDR_LO_OFFSET         0x08
+#define OB_PI_BASE_ADDR_HI_OFFSET      0x0C
+#define OB_PI_BASE_ADDR_LO_OFFSET      0x10
+#define OB_CIPCI_BAR                   0x14
+#define OB_CIPCI_BAR_OFFSET            0x18
+#define OB_INTERRUPT_COALES_OFFSET     0x1C
+#define OB_DYNAMIC_COALES_OFFSET       0x20
+#define OB_PROPERTY_INT_ENABLE         0x40000000
+
+#define MBIC_NMI_ENABLE_VPE0_IOP       0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1      0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE    0x003040
+#define PCIE_EVENT_INTERRUPT           0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE    0x003048
+#define PCIE_ERROR_INTERRUPT           0x00304C
+
+/* SPCV soft reset */
+#define SPC_REG_SOFT_RESET 0x00001000
+#define SPCv_NORMAL_RESET_VALUE                0x1
+
+#define SPCv_SOFT_RESET_READ_MASK              0xC0
+#define SPCv_SOFT_RESET_NO_RESET               0x0
+#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED   0x40
+#define SPCv_SOFT_RESET_HDA_MODE_OCCURED       0x80
+#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED     0xC0
+
+/* signature definition for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE       0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET                  0x000000/* reset register */
+
+/* bit definition for SPC_RESET register */
+#define SPC_REG_RESET_OSSP             0x00000001
+#define SPC_REG_RESET_RAAE             0x00000002
+#define SPC_REG_RESET_PCS_SPBC         0x00000004
+#define SPC_REG_RESET_PCS_IOP_SS       0x00000008
+#define SPC_REG_RESET_PCS_AAP1_SS      0x00000010
+#define SPC_REG_RESET_PCS_AAP2_SS      0x00000020
+#define SPC_REG_RESET_PCS_LM           0x00000040
+#define SPC_REG_RESET_PCS              0x00000080
+#define SPC_REG_RESET_GSM              0x00000100
+#define SPC_REG_RESET_DDR2             0x00010000
+#define SPC_REG_RESET_BDMA_CORE                0x00020000
+#define SPC_REG_RESET_BDMA_SXCBI       0x00040000
+#define SPC_REG_RESET_PCIE_AL_SXCBI    0x00080000
+#define SPC_REG_RESET_PCIE_PWR         0x00100000
+#define SPC_REG_RESET_PCIE_SFT         0x00200000
+#define SPC_REG_RESET_PCS_SXCBI                0x00400000
+#define SPC_REG_RESET_LMS_SXCBI                0x00800000
+#define SPC_REG_RESET_PMIC_SXCBI       0x01000000
+#define SPC_REG_RESET_PMIC_CORE                0x02000000
+#define SPC_REG_RESET_PCIE_PC_SXCBI    0x04000000
+#define SPC_REG_RESET_DEVICE           0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPCV_IBW_AXI_TRANSLATION_LOW   0x001010
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET               0x00000000
+#define RAM_ECC_DB_ERR                 0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC     0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC    0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC    0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK     0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK    0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK    0x00000048
+
+#define RB6_ACCESS_REG                 0x6A0000
+#define HDAC_EXEC_CMD                  0x0002
+#define HDA_C_PA                       0xcb
+#define HDA_SEQ_ID_BITS                        0x00ff0000
+#define HDA_GSM_OFFSET_BITS            0x00FFFFFF
+#define HDA_GSM_CMD_OFFSET_BITS                0x42C0
+#define HDA_GSM_RSP_OFFSET_BITS                0x42E0
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE                0x000000
+#define GSM_CONFIG_RESET_VALUE         0x00003b00
+#define GPIO_ADDR_BASE                 0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET  0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET                 0x80C0
+/* Magic number of soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST           0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS                                 0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE                 0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED       0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID                  0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED       0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE            0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE            0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID           0x07
+
+#endif
index 317a7fdc3b825064e4a5677f0a64bc5f3e43a8d6..23d607218ae8b8a27e07b51d3752bfae17bbba26 100644 (file)
@@ -24,7 +24,9 @@ config SCSI_QLA_FC
 
        Firmware images can be retrieved from:
 
-               ftp://ftp.qlogic.com/outgoing/linux/firmware/
+               http://ldriver.qlogic.com/firmware/
+
+       They are also included in the linux-firmware tree as well.
 
 config TCM_QLA2XXX
        tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
index 729b74389f83168544fb4ef95afe2e9464193ba6..937fed8cb0388550b619dfae667d452fa1c686a7 100644 (file)
@@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_WRITE_DATA);
+               lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_READ_DATA);
+               lcmd_pkt->cntrl_flags = TMF_READ_DATA;
                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
index 5307bf86d5e08d453be2f8d3bd3ca781607b640c..ad72c1d8511162b9465b96265510bdad6dfd1e42 100644 (file)
@@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-void
+static void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
index 14fec976f634e1c4804855617c65aae7fcd4a566..fad71ed067ec3525a742fe251634fa429bb781f8 100644 (file)
@@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
        mrb->mbox_cmd = in_mbox[0];
        wmb();
 
+       ha->iocb_cnt += mrb->iocb_cnt;
        ha->isp_ops->queue_iocb(ha);
 exit_mbox_iocb:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
index a47f99957ba8eb9c37d6e4b46ec25cfe763c24a8..4d231c12463eb38ad29d5b3c4ca25f4af24d99fa 100644 (file)
@@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
        fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
        fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
        fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
-       fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
-       fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
+       fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
+       fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
        fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
        fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
        fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
        fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
-       fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
-       fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
+       fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
+       fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
        fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
        fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
        fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
  * If this is invoked as a result of a userspace call then the entry is marked
  * as nonpersistent using flash_state field.
  **/
-int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
-                                struct dev_db_entry *fw_ddb_entry,
-                                uint16_t *idx, int user)
+static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry,
+                                       uint16_t *idx, int user)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct iscsi_bus_flash_conn *fnode_conn = NULL;
@@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
                ql4_printk(KERN_ERR, ha,
                           "%s: A non-persistent entry %s found\n",
                           __func__, dev->kobj.name);
+               put_device(dev);
                goto exit_ddb_add;
        }
 
@@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
        int parent_type, parent_index = 0xffff;
        int rc = 0;
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev)
                return -EIO;
 
@@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        rc = sprintf(buf, "\n");
                break;
        case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
-               if ((fnode_sess->discovery_parent_idx) >= 0  &&
-                   (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
+               if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
                        parent_index = fnode_sess->discovery_parent_idx;
 
                rc = sprintf(buf, "%u\n", parent_index);
@@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        parent_type = ISCSI_DISC_PARENT_ISNS;
                else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
-               else if (fnode_sess->discovery_parent_type >= 0  &&
-                        fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+               else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
                        parent_type = ISCSI_DISC_PARENT_SENDTGT;
                else
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                rc = -ENOSYS;
                break;
        }
+
+       put_device(dev);
        return rc;
 }
 
@@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
 {
        struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
        struct scsi_qla_host *ha = to_qla_host(shost);
-       struct dev_db_entry *fw_ddb_entry = NULL;
        struct iscsi_flashnode_param_info *fnode_param;
        struct nlattr *attr;
        int rc = QLA_ERROR;
        uint32_t rem = len;
 
-       fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
-       if (!fw_ddb_entry) {
-               DEBUG2(ql4_printk(KERN_ERR, ha,
-                                 "%s: Unable to allocate ddb buffer\n",
-                                 __func__));
-               return -ENOMEM;
-       }
-
        nla_for_each_attr(attr, data, len, rem) {
                fnode_param = nla_data(attr);
 
@@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
        struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint16_t *ddb_cookie = NULL;
-       size_t ddb_size;
+       size_t ddb_size = 0;
        void *pddb = NULL;
        int target_id;
        int rc = 0;
 
-       if (!fnode_sess) {
-               rc = -EINVAL;
-               goto exit_ddb_del;
-       }
-
        if (fnode_sess->is_boot_target) {
                rc = -EPERM;
                DEBUG2(ql4_printk(KERN_ERR, ha,
@@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
 
                dev_db_start_offset += (fnode_sess->target_id *
                                       sizeof(*fw_ddb_entry));
-               dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
-                                      (void *)fw_ddb_entry;
+               dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
 
                ddb_size = sizeof(*ddb_cookie);
        }
index 83e0fec35d563262df3d82bbc404d0b61456353a..fe873cf7570d05abe96e71e4422c7744ee15c265 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
index 5add6f4e79281a252928519f9613cf9c92f275dc..0a537a0515ca6c6c37829fb3c45f1fe8aa20c4de 100644 (file)
@@ -1997,24 +1997,39 @@ out:
        return ret;
 }
 
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned long lba_to_map_index(sector_t lba)
+{
+       if (scsi_debug_unmap_alignment) {
+               lba += scsi_debug_unmap_granularity -
+                       scsi_debug_unmap_alignment;
+       }
+       do_div(lba, scsi_debug_unmap_granularity);
+
+       return lba;
+}
+
+static sector_t map_index_to_lba(unsigned long index)
 {
-       unsigned int granularity, alignment, mapped;
-       sector_t block, next, end;
+       return index * scsi_debug_unmap_granularity -
+               scsi_debug_unmap_alignment;
+}
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-       block = lba + alignment;
-       do_div(block, granularity);
+static unsigned int map_state(sector_t lba, unsigned int *num)
+{
+       sector_t end;
+       unsigned int mapped;
+       unsigned long index;
+       unsigned long next;
 
-       mapped = test_bit(block, map_storep);
+       index = lba_to_map_index(lba);
+       mapped = test_bit(index, map_storep);
 
        if (mapped)
-               next = find_next_zero_bit(map_storep, map_size, block);
+               next = find_next_zero_bit(map_storep, map_size, index);
        else
-               next = find_next_bit(map_storep, map_size, block);
+               next = find_next_bit(map_storep, map_size, index);
 
-       end = next * granularity - scsi_debug_unmap_alignment;
+       end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
        *num = end - lba;
 
        return mapped;
@@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
 
 static void map_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (block < map_size)
-                       set_bit(block, map_storep);
+               if (index < map_size)
+                       set_bit(index, map_storep);
 
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
 static void unmap_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (rem == 0 && lba + granularity < end && block < map_size) {
-                       clear_bit(block, map_storep);
-                       if (scsi_debug_lbprz)
+               if (lba == map_index_to_lba(index) &&
+                   lba + scsi_debug_unmap_granularity <= end &&
+                   index < map_size) {
+                       clear_bit(index, map_storep);
+                       if (scsi_debug_lbprz) {
                                memset(fake_storep +
-                                      block * scsi_debug_sector_size, 0,
-                                      scsi_debug_sector_size);
+                                      lba * scsi_debug_sector_size, 0,
+                                      scsi_debug_sector_size *
+                                      scsi_debug_unmap_granularity);
+                       }
                }
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
@@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
        ret = do_device_access(SCpnt, devip, lba, num, 1);
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
        write_unlock_irqrestore(&atomic_rw, iflags);
        if (-1 == ret)
@@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
 
-       if (unmap && scsi_debug_unmap_granularity) {
+       if (unmap && scsi_debug_lbp()) {
                unmap_region(lba, num);
                goto out;
        }
@@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
                       fake_storep + (lba * scsi_debug_sector_size),
                       scsi_debug_sector_size);
 
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
 out:
        write_unlock_irqrestore(&atomic_rw, iflags);
@@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void)
 
        /* Logical Block Provisioning */
        if (scsi_debug_lbp()) {
-               unsigned int map_bytes;
-
                scsi_debug_unmap_max_blocks =
                        clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
 
@@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void)
                        clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
 
                if (scsi_debug_unmap_alignment &&
-                   scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
+                   scsi_debug_unmap_granularity <=
+                   scsi_debug_unmap_alignment) {
                        printk(KERN_ERR
-                              "%s: ERR: unmap_granularity < unmap_alignment\n",
+                              "%s: ERR: unmap_granularity <= unmap_alignment\n",
                               __func__);
                        return -EINVAL;
                }
 
-               map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
-               map_bytes = map_size >> 3;
-               map_storep = vmalloc(map_bytes);
+               map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+               map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
                printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
                       map_size);
@@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void)
                        goto free_vm;
                }
 
-               memset(map_storep, 0x0, map_bytes);
+               bitmap_zero(map_storep, map_size);
 
                /* Map first 1KB for partition table */
                if (scsi_debug_num_parts)
index c1b05a83d403221912fb845e314d3a30656a89fc..f43de1e56420ac7916ca99c83281b81b417f4fb9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
        struct scsi_device *sdev = scmd->device;
        struct Scsi_Host *shost = sdev->host;
        DECLARE_COMPLETION_ONSTACK(done);
-       unsigned long timeleft;
+       unsigned long timeleft = timeout;
        struct scsi_eh_save ses;
+       const unsigned long stall_for = msecs_to_jiffies(100);
        int rtn;
 
+retry:
        scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
        shost->eh_action = &done;
 
        scsi_log_send(scmd);
        scmd->scsi_done = scsi_eh_done;
-       shost->hostt->queuecommand(shost, scmd);
-
-       timeleft = wait_for_completion_timeout(&done, timeout);
+       rtn = shost->hostt->queuecommand(shost, scmd);
+       if (rtn) {
+               if (timeleft > stall_for) {
+                       scsi_eh_restore_cmnd(scmd, &ses);
+                       timeleft -= stall_for;
+                       msleep(jiffies_to_msecs(stall_for));
+                       goto retry;
+               }
+               /* signal not to enter either branch of the if () below */
+               timeleft = 0;
+               rtn = NEEDS_RETRY;
+       } else {
+               timeleft = wait_for_completion_timeout(&done, timeout);
+       }
 
        shost->eh_action = NULL;
 
-       scsi_log_completion(scmd, SUCCESS);
+       scsi_log_completion(scmd, rtn);
 
        SCSI_LOG_ERROR_RECOVERY(3,
                printk("%s: scmd: %p, timeleft: %ld\n",
                        __func__, scmd, timeleft));
 
        /*
-        * If there is time left scsi_eh_done got called, and we will
-        * examine the actual status codes to see whether the command
-        * actually did complete normally, else tell the host to forget
-        * about this command.
+        * If there is time left scsi_eh_done got called, and we will examine
+        * the actual status codes to see whether the command actually did
+        * complete normally, else if we have a zero return and no time left,
+        * the command must still be pending, so abort it and return FAILED.
+        * If we never actually managed to issue the command, because
+        * ->queuecommand() kept returning non zero, use the rtn = FAILED
+        * value above (so don't execute either branch of the if)
         */
        if (timeleft) {
                rtn = scsi_eh_completed_normally(scmd);
@@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
                        rtn = FAILED;
                        break;
                }
-       } else {
+       } else if (!rtn) {
                scsi_abort_eh_cmnd(scmd);
                rtn = FAILED;
        }
index 47799a33d6caacf7e5715e9c7760d121c24797b1..133926b1bb78bc39231260b5359dff3ff9a16bcb 100644 (file)
@@ -1019,8 +1019,7 @@ exit_match_index:
 /**
  * iscsi_get_flashnode_by_index -finds flashnode session entry by index
  * @shost: pointer to host data
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
+ * @idx: index to match
  *
  * Finds the flashnode session object for the passed index
  *
@@ -1029,13 +1028,13 @@ exit_match_index:
  *  %NULL on failure
  */
 static struct iscsi_bus_flash_session *
-iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
-                            int (*fn)(struct device *dev, void *data))
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct device *dev;
 
-       dev = device_find_child(&shost->shost_gendev, data, fn);
+       dev = device_find_child(&shost->shost_gendev, &idx,
+                               flashnode_match_index);
        if (dev)
                fnode_sess = iscsi_dev_to_flash_session(dev);
 
@@ -1059,18 +1058,13 @@ struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data))
 {
-       struct device *dev;
-
-       dev = device_find_child(&shost->shost_gendev, data, fn);
-       return dev;
+       return device_find_child(&shost->shost_gendev, data, fn);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
 
 /**
  * iscsi_find_flashnode_conn - finds flashnode connection entry
  * @fnode_sess: pointer to parent flashnode session entry
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
  *
  * Finds the flashnode connection object comparing the data passed using logic
  * defined in passed function pointer
@@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
  *  %NULL on failure
  */
 struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
 {
-       struct device *dev;
-
-       dev = device_find_child(&fnode_sess->dev, data, fn);
-       return dev;
+       return device_find_child(&fnode_sess->dev, NULL,
+                                iscsi_is_flashnode_conn_dev);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
 
@@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->set_flashnode_param) {
@@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.set_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.set_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.set_flashnode.host_no);
+                      __func__, idx, ev->u.set_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
 {
        struct Scsi_Host *shost;
        struct iscsi_bus_flash_session *fnode_sess;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->del_flashnode) {
@@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.del_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.del_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.del_flashnode.host_no);
+                      __func__, idx, ev->u.del_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
        err = transport->del_flashnode(fnode_sess);
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->login_flashnode) {
@@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.login_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.login_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.login_flashnode.host_no);
+                      __func__, idx, ev->u.login_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->login_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->logout_flashnode) {
@@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.logout_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.logout_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.logout_flashnode.host_no);
+                      __func__, idx, ev->u.logout_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
 
        err = transport->logout_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void)
        }
 
        iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
-       if (!iscsi_eh_timer_workq)
+       if (!iscsi_eh_timer_workq) {
+               err = -ENOMEM;
                goto release_nls;
+       }
 
        return 0;
 
index e6689776b4f617ac55b1e9c4dac138cee85a9196..196240dcf26e4c7c2192f78682a40b750e3fd51f 100644 (file)
@@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
        char *buffer_data;
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
+       const char *temp = "temporary ";
        int len;
 
        if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                 * it's not worth the risk */
                return -EINVAL;
 
+       if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
+               buf += sizeof(temp) - 1;
+               sdkp->cache_override = 1;
+       } else {
+               sdkp->cache_override = 0;
+       }
+
        for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
                len = strlen(sd_cache_types[i]);
                if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
        rcd = ct & 0x01 ? 1 : 0;
        wce = ct & 0x02 ? 1 : 0;
+
+       if (sdkp->cache_override) {
+               sdkp->WCE = wce;
+               sdkp->RCD = rcd;
+               return count;
+       }
+
        if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
                            SD_MAX_RETRIES, &data, NULL))
                return -EINVAL;
@@ -2318,6 +2333,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
        int old_rcd = sdkp->RCD;
        int old_dpofua = sdkp->DPOFUA;
 
+
+       if (sdkp->cache_override)
+               return;
+
        first_len = 4;
        if (sdp->skip_ms_page_8) {
                if (sdp->type == TYPE_RBC)
@@ -2811,6 +2830,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        sdkp->capacity = 0;
        sdkp->media_present = 1;
        sdkp->write_prot = 0;
+       sdkp->cache_override = 0;
        sdkp->WCE = 0;
        sdkp->RCD = 0;
        sdkp->ATO = 0;
index 74a1e4ca5401f9a58e6e705bb2fc8a99fcd04a0f..2386aeb41fe8d74826908d3dca71eb3f97a59ac8 100644 (file)
@@ -73,6 +73,7 @@ struct scsi_disk {
        u8              protection_type;/* Data Integrity Field */
        u8              provisioning_mode;
        unsigned        ATO : 1;        /* state of disk ATO bit */
+       unsigned        cache_override : 1; /* temp override of WCE,RCD */
        unsigned        WCE : 1;        /* state of disk WCE bit */
        unsigned        RCD : 1;        /* state of disk RCD bit, unused */
        unsigned        DPOFUA : 1;     /* state of disk DPOFUA bit */
index 04998f36e5071bdda94ff225f39b0ca3616eeb7b..6174ca4ea27594487d7dc0828d9e21841742b8ed 100644 (file)
@@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
                if (sdt->app_tag == 0xffff)
                        return 0;
 
-               /* Bad ref tag received from disk */
-               if (sdt->ref_tag == 0xffffffff) {
-                       printk(KERN_ERR
-                              "%s: bad phys ref tag on sector %lu\n",
-                              bix->disk_name, (unsigned long)sector);
-                       return -EIO;
-               }
-
                if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
                        printk(KERN_ERR
                               "%s: ref tag error on sector %lu (rcvd %u)\n",
index 0371047c59222251971d823219e3639ac2a53ff6..35faf24c604401eeb560472542cd2ed350289d10 100644 (file)
@@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI
          If you have a controller with this interface, say Y or M here.
 
          If unsure, say N.
+
+config SCSI_UFSHCD_PLATFORM
+       tristate "Platform bus based UFS Controller support"
+       depends on SCSI_UFSHCD
+       ---help---
+       This selects the UFS host controller support. Select this if
+       you have an UFS controller on Platform bus.
+
+       If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
index 9eda0dfbd6df383304de10a0805868ee6485929c..1e5bd48457d636ef0aafd096f8a8038ac0dcbd5e 100644 (file)
@@ -1,3 +1,4 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644 (file)
index 0000000..03319ac
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_suspend
+        * 2. Do bus specific power management
+        */
+
+       disable_irq(hba->irq);
+
+       return 0;
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_resume.
+        * 2. Do bus specific wake up
+        */
+
+       enable_irq(hba->irq);
+
+       return 0;
+}
+#else
+#define ufshcd_pltfrm_suspend  NULL
+#define ufshcd_pltfrm_resume   NULL
+#endif
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+       struct ufs_hba *hba;
+       void __iomem *mmio_base;
+       struct resource *mem_res;
+       struct resource *irq_res;
+       resource_size_t mem_size;
+       int err;
+       struct device *dev = &pdev->dev;
+
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res) {
+               dev_err(&pdev->dev,
+                       "Memory resource not available\n");
+               err = -ENODEV;
+               goto out_error;
+       }
+
+       mem_size = resource_size(mem_res);
+       if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
+               dev_err(&pdev->dev,
+                       "Cannot reserve the memory resource\n");
+               err = -EBUSY;
+               goto out_error;
+       }
+
+       mmio_base = ioremap_nocache(mem_res->start, mem_size);
+       if (!mmio_base) {
+               dev_err(&pdev->dev, "memory map failed\n");
+               err = -ENOMEM;
+               goto out_release_regions;
+       }
+
+       irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq_res) {
+               dev_err(&pdev->dev, "IRQ resource not available\n");
+               err = -ENODEV;
+               goto out_iounmap;
+       }
+
+       err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
+       if (err) {
+               dev_err(&pdev->dev, "set dma mask failed\n");
+               goto out_iounmap;
+       }
+
+       err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
+       if (err) {
+               dev_err(&pdev->dev, "Intialization failed\n");
+               goto out_iounmap;
+       }
+
+       platform_set_drvdata(pdev, hba);
+
+       return 0;
+
+out_iounmap:
+       iounmap(mmio_base);
+out_release_regions:
+       release_mem_region(mem_res->start, mem_size);
+out_error:
+       return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+       struct resource *mem_res;
+       resource_size_t mem_size;
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       disable_irq(hba->irq);
+
+       /* Some buggy controllers raise interrupt after
+        * the resources are removed. So first we unregister the
+        * irq handler and then the resources used by driver
+        */
+
+       free_irq(hba->irq, hba);
+       ufshcd_remove(hba);
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res)
+               dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
+       else {
+               mem_size = resource_size(mem_res);
+               release_mem_region(mem_res->start, mem_size);
+       }
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+       { .compatible = "jedec,ufs-1.1"},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+       .suspend        = ufshcd_pltfrm_suspend,
+       .resume         = ufshcd_pltfrm_resume,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+       .probe  = ufshcd_pltfrm_probe,
+       .remove = ufshcd_pltfrm_remove,
+       .driver = {
+               .name   = "ufshcd",
+               .owner  = THIS_MODULE,
+               .pm     = &ufshcd_dev_pm_ops,
+               .of_match_table = ufs_of_match,
+       },
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 60fd40c4e4c2ada48a268a997099b18f2fa4ef34..c32a478df81b83c75cdbbdac9206236021f9f863 100644 (file)
@@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
                ucd_cmd_ptr->header.dword_2 = 0;
 
                ucd_cmd_ptr->exp_data_transfer_len =
-                       cpu_to_be32(lrbp->cmd->transfersize);
+                       cpu_to_be32(lrbp->cmd->sdb.length);
 
                memcpy(ucd_cmd_ptr->cdb,
                       lrbp->cmd->cmnd,
index 2b85c521f7375452ed4c0fd54d7cba37b60f4dde..c12916248469f577795d28e96c4f86be747fbf5b 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5705M_2   0x165e
 #define PCI_DEVICE_ID_NX2_57712                0x1662
 #define PCI_DEVICE_ID_NX2_57712E       0x1663
+#define PCI_DEVICE_ID_NX2_57712_MF     0x1663
 #define PCI_DEVICE_ID_TIGON3_5714      0x1668
 #define PCI_DEVICE_ID_TIGON3_5714S     0x1669
 #define PCI_DEVICE_ID_TIGON3_5780      0x166a
 #define PCI_DEVICE_ID_TIGON3_5780S     0x166b
 #define PCI_DEVICE_ID_TIGON3_5705F     0x166e
+#define PCI_DEVICE_ID_NX2_57712_VF     0x166f
 #define PCI_DEVICE_ID_TIGON3_5754M     0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M     0x1673
 #define PCI_DEVICE_ID_TIGON3_5756      0x1674
 #define PCI_DEVICE_ID_TIGON3_5787      0x169b
 #define PCI_DEVICE_ID_TIGON3_5788      0x169c
 #define PCI_DEVICE_ID_TIGON3_5789      0x169d
+#define PCI_DEVICE_ID_NX2_57840_4_10   0x16a1
+#define PCI_DEVICE_ID_NX2_57840_2_20   0x16a2
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_57800_MF     0x16a5
 #define PCI_DEVICE_ID_TIGON3_5702X     0x16a6
 #define PCI_DEVICE_ID_TIGON3_5703X     0x16a7
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
index ef937b56f9b54c44e4003d762eaa7105534077da..e2c1e66d58ae6c810e4ca18d4e62d9f78b2c3e4b 100644 (file)
@@ -118,7 +118,7 @@ struct ex_phy {
 
        enum ex_phy_state phy_state;
 
-       enum sas_dev_type attached_dev_type;
+       enum sas_device_type attached_dev_type;
        enum sas_linkrate linkrate;
 
        u8   attached_sata_host:1;
@@ -195,7 +195,7 @@ enum {
 
 struct domain_device {
        spinlock_t done_lock;
-        enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
 
         enum sas_linkrate linkrate;
         enum sas_linkrate min_linkrate;
index a6026da25f3e0bf5787d479e1511da85c1175299..25ac6283b9c753a6af74055d590a1ed613004be1 100644 (file)
@@ -107,7 +107,7 @@ enum osd_attributes_mode {
  *             int exponent: 04;
  *     }
  */
-typedef __be32 __bitwise osd_cdb_offset;
+typedef __be32 osd_cdb_offset;
 
 enum {
        OSD_OFFSET_UNUSED = 0xFFFFFFFF,
index be3eb0bf1ac0ef1213343a173787c645d16e21ef..0d2607d1238753f951e43e6b278735971413423c 100644 (file)
@@ -90,16 +90,18 @@ enum sas_oob_mode {
 };
 
 /* See sas_discover.c if you plan on changing these */
-enum sas_dev_type {
-       NO_DEVICE   = 0,          /* protocol */
-       SAS_END_DEV = 1,          /* protocol */
-       EDGE_DEV    = 2,          /* protocol */
-       FANOUT_DEV  = 3,          /* protocol */
-       SAS_HA      = 4,
-       SATA_DEV    = 5,
-       SATA_PM     = 7,
-       SATA_PM_PORT= 8,
-       SATA_PENDING  = 9,
+enum sas_device_type {
+       /* these are SAS protocol defined (attached device type field) */
+       SAS_PHY_UNUSED = 0,
+       SAS_END_DEVICE = 1,
+       SAS_EDGE_EXPANDER_DEVICE = 2,
+       SAS_FANOUT_EXPANDER_DEVICE = 3,
+       /* these are internal to libsas */
+       SAS_HA = 4,
+       SAS_SATA_DEV = 5,
+       SAS_SATA_PM = 7,
+       SAS_SATA_PM_PORT = 8,
+       SAS_SATA_PENDING = 9,
 };
 
 enum sas_protocol {
index ff71a56546845f2e29266ecf466ecd6d7174f38a..00f41aeeecf54f321b74599b99dc5f97a5af0bca 100644 (file)
@@ -32,8 +32,8 @@
 
 static inline int dev_is_sata(struct domain_device *dev)
 {
-       return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-              dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING;
+       return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+              dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
 }
 
 int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
index 4a58cca2ecc182f1d44912297cc294b3ecce4760..d0f1602985e7efe6d6ab40dd67fbcc04e6a32276 100644 (file)
@@ -471,14 +471,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
 extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
 extern int iscsi_flashnode_bus_match(struct device *dev,
                                     struct device_driver *drv);
-extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
-
 extern struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data));
-
 extern struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data));
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
+
 #endif
index 9b8e08879cfc9d7f7038245cf12e9b162f0f7f4e..0bd71e2702e3181e115ba8aa8e2b49a78ac21da7 100644 (file)
@@ -10,13 +10,6 @@ struct scsi_transport_template;
 struct sas_rphy;
 struct request;
 
-enum sas_device_type {
-       SAS_PHY_UNUSED = 0,
-       SAS_END_DEVICE = 1,
-       SAS_EDGE_EXPANDER_DEVICE = 2,
-       SAS_FANOUT_EXPANDER_DEVICE = 3,
-};
-
 static inline int sas_protocol_ata(enum sas_protocol proto)
 {
        return ((proto & SAS_PROTOCOL_SATA) ||