[SCSI] lpfc 8.2.2 : Attribute and Parameter splits for vport and physical port
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / lpfc / lpfc_scsi.c
index c3e68e0d8f7445e426fa0ee28cc35ab5903c37cc..78666c62330e0e805b77e704f4325a125ae1e401 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include "lpfc.h"
 #include "lpfc_logmsg.h"
 #include "lpfc_crtn.h"
+#include "lpfc_vport.h"
 
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
+/*
+ * This function is called with no lock held when there is a resource
+ * error in driver or in firmware.
+ */
+void
+lpfc_adjust_queue_depth(struct lpfc_hba *phba)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&phba->hbalock, flags);
+       atomic_inc(&phba->num_rsrc_err);
+       phba->last_rsrc_error_time = jiffies;
+
+       if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+               return;
+       }
+
+       phba->last_ramp_down_time = jiffies;
+
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
+       spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+       if ((phba->pport->work_port_events &
+               WORKER_RAMP_DOWN_QUEUE) == 0) {
+               phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
+       }
+       spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+       spin_lock_irqsave(&phba->hbalock, flags);
+       if (phba->work_wait)
+               wake_up(phba->work_wait);
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
+       return;
+}
+
+/*
+ * This function is called with no lock held when there is a successful
+ * SCSI command completion.
+ */
+static inline void
+lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
+                       struct scsi_device *sdev)
+{
+       unsigned long flags;
+       struct lpfc_hba *phba = vport->phba;
+       atomic_inc(&phba->num_cmd_success);
+
+       if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
+               return;
+       spin_lock_irqsave(&phba->hbalock, flags);
+       if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
+        ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+               return;
+       }
+       phba->last_ramp_up_time = jiffies;
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
+       spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+       if ((phba->pport->work_port_events &
+               WORKER_RAMP_UP_QUEUE) == 0) {
+               phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
+       }
+       spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+       spin_lock_irqsave(&phba->hbalock, flags);
+       if (phba->work_wait)
+               wake_up(phba->work_wait);
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+}
+
+void
+lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       struct Scsi_Host  *shost;
+       struct scsi_device *sdev;
+       unsigned long new_queue_depth;
+       unsigned long num_rsrc_err, num_cmd_success;
+       int i;
+
+       num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+       num_cmd_success = atomic_read(&phba->num_cmd_success);
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+                       shost = lpfc_shost_from_vport(vports[i]);
+                       shost_for_each_device(sdev, shost) {
+                               new_queue_depth =
+                                       sdev->queue_depth * num_rsrc_err /
+                                       (num_rsrc_err + num_cmd_success);
+                               if (!new_queue_depth)
+                                       new_queue_depth = sdev->queue_depth - 1;
+                               else
+                                       new_queue_depth = sdev->queue_depth -
+                                                               new_queue_depth;
+                               if (sdev->ordered_tags)
+                                       scsi_adjust_queue_depth(sdev,
+                                                       MSG_ORDERED_TAG,
+                                                       new_queue_depth);
+                               else
+                                       scsi_adjust_queue_depth(sdev,
+                                                       MSG_SIMPLE_TAG,
+                                                       new_queue_depth);
+                       }
+               }
+       lpfc_destroy_vport_work_array(vports);
+       spin_unlock_irq(&phba->hbalock);
+       atomic_set(&phba->num_rsrc_err, 0);
+       atomic_set(&phba->num_cmd_success, 0);
+}
+
+void
+lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       struct Scsi_Host  *shost;
+       struct scsi_device *sdev;
+       int i;
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+                       shost = lpfc_shost_from_vport(vports[i]);
+                       shost_for_each_device(sdev, shost) {
+                               if (sdev->ordered_tags)
+                                       scsi_adjust_queue_depth(sdev,
+                                                       MSG_ORDERED_TAG,
+                                                       sdev->queue_depth+1);
+                               else
+                                       scsi_adjust_queue_depth(sdev,
+                                                       MSG_SIMPLE_TAG,
+                                                       sdev->queue_depth+1);
+                       }
+               }
+       lpfc_destroy_vport_work_array(vports);
+       atomic_set(&phba->num_rsrc_err, 0);
+       atomic_set(&phba->num_cmd_success, 0);
+}
 
 /*
  * This routine allocates a scsi buffer, which contains all the necessary
  * and the BPL BDE is setup in the IOCB.
  */
 static struct lpfc_scsi_buf *
-lpfc_new_scsi_buf(struct lpfc_hba * phba)
+lpfc_new_scsi_buf(struct lpfc_vport *vport)
 {
+       struct lpfc_hba *phba = vport->phba;
        struct lpfc_scsi_buf *psb;
        struct ulp_bde64 *bpl;
        IOCB_t *iocb;
@@ -63,7 +207,6 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
        if (!psb)
                return NULL;
        memset(psb, 0, sizeof (struct lpfc_scsi_buf));
-       psb->scsi_hba = phba;
 
        /*
         * Get memory from the pci pool to map the virt space to pci bus space
@@ -146,12 +289,16 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
 
        spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
        list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+       if (lpfc_cmd) {
+               lpfc_cmd->seg_cnt = 0;
+               lpfc_cmd->nonsg_phys = 0;
+       }
        spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
        return  lpfc_cmd;
 }
 
 static void
-lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
@@ -162,7 +309,7 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
 }
 
 static int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 {
        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
        struct scatterlist *sgel = NULL;
@@ -171,8 +318,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
        dma_addr_t physaddr;
        uint32_t i, num_bde = 0;
-       int datadir = scsi_cmnd->sc_data_direction;
-       int dma_error;
+       int nseg, datadir = scsi_cmnd->sc_data_direction;
 
        /*
         * There are three possibilities here - use scatter-gather segment, use
@@ -181,26 +327,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
         * data bde entry.
         */
        bpl += 2;
-       if (scsi_cmnd->use_sg) {
+       if (scsi_sg_count(scsi_cmnd)) {
                /*
                 * The driver stores the segment count returned from pci_map_sg
                 * because this a count of dma-mappings used to map the use_sg
                 * pages.  They are not guaranteed to be the same for those
                 * architectures that implement an IOMMU.
                 */
-               sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
-               lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
-                                               scsi_cmnd->use_sg, datadir);
-               if (lpfc_cmd->seg_cnt == 0)
+
+               nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
+                                 scsi_sg_count(scsi_cmnd), datadir);
+               if (unlikely(!nseg))
                        return 1;
 
+               lpfc_cmd->seg_cnt = nseg;
                if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
                        printk(KERN_ERR "%s: Too many sg segments from "
                               "dma_map_sg.  Config %d, seg_cnt %d",
                               __FUNCTION__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
-                       dma_unmap_sg(&phba->pcidev->dev, sgel,
-                                    lpfc_cmd->seg_cnt, datadir);
+                       scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
 
@@ -210,7 +356,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
                 * single scsi command.  Just run through the seg_cnt and format
                 * the bde's.
                 */
-               for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
+               scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
                        physaddr = sg_dma_address(sgel);
                        bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
                        bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
@@ -221,34 +367,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
                                bpl->tus.f.bdeFlags = BUFF_USE_RCV;
                        bpl->tus.w = le32_to_cpu(bpl->tus.w);
                        bpl++;
-                       sgel++;
                        num_bde++;
                }
-       } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
-               physaddr = dma_map_single(&phba->pcidev->dev,
-                                         scsi_cmnd->request_buffer,
-                                         scsi_cmnd->request_bufflen,
-                                         datadir);
-               dma_error = dma_mapping_error(physaddr);
-               if (dma_error) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                               "%d:0718 Unable to dma_map_single "
-                               "request_buffer: x%x\n",
-                               phba->brd_no, dma_error);
-                       return 1;
-               }
-
-               lpfc_cmd->nonsg_phys = physaddr;
-               bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
-               bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
-               bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
-               if (datadir == DMA_TO_DEVICE)
-                       bpl->tus.f.bdeFlags = 0;
-               else
-                       bpl->tus.f.bdeFlags = BUFF_USE_RCV;
-               bpl->tus.w = le32_to_cpu(bpl->tus.w);
-               num_bde = 1;
-               bpl++;
        }
 
        /*
@@ -262,7 +382,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
                (num_bde * sizeof (struct ulp_bde64));
        iocb_cmd->ulpBdeCount = 1;
        iocb_cmd->ulpLe = 1;
-       fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
+       fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
        return 0;
 }
 
@@ -275,26 +395,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
         * a request buffer, but did not request use_sg.  There is a third
         * case, but it does not require resource deallocation.
         */
-       if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
-               dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
-                               psb->seg_cnt, psb->pCmd->sc_data_direction);
-       } else {
-                if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
-                       dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
-                                               psb->pCmd->request_bufflen,
-                                               psb->pCmd->sc_data_direction);
-                }
-       }
+       if (psb->seg_cnt > 0)
+               scsi_dma_unmap(psb->pCmd);
 }
 
 static void
-lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+                   struct lpfc_iocbq *rsp_iocb)
 {
        struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
        struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
        struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
-       struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
-       uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
+       struct lpfc_hba *phba = vport->phba;
+       uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+       uint32_t vpi = vport->vpi;
        uint32_t resp_info = fcprsp->rspStatus2;
        uint32_t scsi_status = fcprsp->rspStatus3;
        uint32_t *lp;
@@ -327,9 +441,9 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
                logit = LOG_FCP;
 
        lpfc_printf_log(phba, KERN_WARNING, logit,
-                       "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
+                       "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
                        "Data: x%x x%x x%x x%x x%x\n",
-                       phba->brd_no, cmnd->cmnd[0], scsi_status,
+                       phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
                        be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
                        be32_to_cpu(fcprsp->rspResId),
                        be32_to_cpu(fcprsp->rspSnsLen),
@@ -345,16 +459,36 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
                }
        }
 
-       cmnd->resid = 0;
+       scsi_set_resid(cmnd, 0);
        if (resp_info & RESID_UNDER) {
-               cmnd->resid = be32_to_cpu(fcprsp->rspResId);
+               scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
 
                lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-                               "%d:0716 FCP Read Underrun, expected %d, "
-                               "residual %d Data: x%x x%x x%x\n", phba->brd_no,
-                               be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
-                               fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
+                               "%d (%d):0716 FCP Read Underrun, expected %d, "
+                               "residual %d Data: x%x x%x x%x\n",
+                               phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
+                               scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
+                               cmnd->underflow);
 
+               /*
+                * If there is an under run check if under run reported by
+                * storage array is same as the under run reported by HBA.
+                * If this is not same, there is a dropped frame.
+                */
+               if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+                       fcpi_parm &&
+                       (scsi_get_resid(cmnd) != fcpi_parm)) {
+                       lpfc_printf_log(phba, KERN_WARNING,
+                                       LOG_FCP | LOG_FCP_ERROR,
+                                       "%d (%d):0735 FCP Read Check Error "
+                                       "and Underrun Data: x%x x%x x%x x%x\n",
+                                       phba->brd_no, vpi,
+                                       be32_to_cpu(fcpcmd->fcpDl),
+                                       scsi_get_resid(cmnd), fcpi_parm,
+                                       cmnd->cmnd[0]);
+                       scsi_set_resid(cmnd, scsi_bufflen(cmnd));
+                       host_status = DID_ERROR;
+               }
                /*
                 * The cmnd->underflow is the minimum number of bytes that must
                 * be transfered for this command.  Provided a sense condition
@@ -363,22 +497,23 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
                 */
                if (!(resp_info & SNS_LEN_VALID) &&
                    (scsi_status == SAM_STAT_GOOD) &&
-                   (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
+                   (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
+                    < cmnd->underflow)) {
                        lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-                                       "%d:0717 FCP command x%x residual "
+                                       "%d (%d):0717 FCP command x%x residual "
                                        "underrun converted to error "
-                                       "Data: x%x x%x x%x\n", phba->brd_no,
-                                       cmnd->cmnd[0], cmnd->request_bufflen,
-                                       cmnd->resid, cmnd->underflow);
-
+                                       "Data: x%x x%x x%x\n",
+                                       phba->brd_no, vpi, cmnd->cmnd[0],
+                                       scsi_bufflen(cmnd),
+                                       scsi_get_resid(cmnd), cmnd->underflow);
                        host_status = DID_ERROR;
                }
        } else if (resp_info & RESID_OVER) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-                               "%d:0720 FCP command x%x residual "
+                               "%d (%d):0720 FCP command x%x residual "
                                "overrun error. Data: x%x x%x \n",
-                               phba->brd_no, cmnd->cmnd[0],
-                               cmnd->request_bufflen, cmnd->resid);
+                               phba->brd_no, vpi, cmnd->cmnd[0],
+                               scsi_bufflen(cmnd), scsi_get_resid(cmnd));
                host_status = DID_ERROR;
 
        /*
@@ -388,13 +523,14 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
        } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
                        (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
-                       "%d:0734 FCP Read Check Error Data: "
-                       "x%x x%x x%x x%x\n", phba->brd_no,
-                       be32_to_cpu(fcpcmd->fcpDl),
-                       be32_to_cpu(fcprsp->rspResId),
-                       fcpi_parm, cmnd->cmnd[0]);
+                               "%d (%d):0734 FCP Read Check Error Data: "
+                               "x%x x%x x%x x%x\n",
+                               phba->brd_no, vpi,
+                               be32_to_cpu(fcpcmd->fcpDl),
+                               be32_to_cpu(fcprsp->rspResId),
+                               fcpi_parm, cmnd->cmnd[0]);
                host_status = DID_ERROR;
-               cmnd->resid = cmnd->request_bufflen;
+               scsi_set_resid(cmnd, scsi_bufflen(cmnd));
        }
 
  out:
@@ -407,9 +543,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 {
        struct lpfc_scsi_buf *lpfc_cmd =
                (struct lpfc_scsi_buf *) pIocbIn->context1;
+       struct lpfc_vport      *vport = pIocbIn->vport;
        struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
        struct lpfc_nodelist *pnode = rdata->pnode;
        struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+       uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
+                       ? lpfc_cmd->cur_iocbq.vport->vpi
+                       : 0);
        int result;
        struct scsi_device *sdev, *tmp_sdev;
        int depth = 0;
@@ -425,22 +565,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                        lpfc_cmd->status = IOSTAT_DEFAULT;
 
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-                               "%d:0729 FCP cmd x%x failed <%d/%d> status: "
-                               "x%x result: x%x Data: x%x x%x\n",
-                               phba->brd_no, cmd->cmnd[0], cmd->device->id,
-                               cmd->device->lun, lpfc_cmd->status,
-                               lpfc_cmd->result, pIocbOut->iocb.ulpContext,
+                               "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
+                               "status: x%x result: x%x Data: x%x x%x\n",
+                               phba->brd_no, vpi, cmd->cmnd[0],
+                               cmd->device ? cmd->device->id : 0xffff,
+                               cmd->device ? cmd->device->lun : 0xffff,
+                               lpfc_cmd->status, lpfc_cmd->result,
+                               pIocbOut->iocb.ulpContext,
                                lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
 
                switch (lpfc_cmd->status) {
                case IOSTAT_FCP_RSP_ERROR:
                        /* Call FCP RSP handler to determine result */
-                       lpfc_handle_fcp_err(lpfc_cmd);
+                       lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
                        break;
                case IOSTAT_NPORT_BSY:
                case IOSTAT_FABRIC_BSY:
                        cmd->result = ScsiResult(DID_BUS_BUSY, 0);
                        break;
+               case IOSTAT_LOCAL_REJECT:
+                       if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
+                           lpfc_cmd->result == IOERR_NO_RESOURCES ||
+                           lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
+                               cmd->result = ScsiResult(DID_REQUEUE, 0);
+                       break;
+               } /* else: fall through */
                default:
                        cmd->result = ScsiResult(DID_ERROR, 0);
                        break;
@@ -457,31 +606,36 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                uint32_t *lp = (uint32_t *)cmd->sense_buffer;
 
                lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-                               "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
-                               "SNS x%x x%x Data: x%x x%x\n",
-                               phba->brd_no, cmd->device->id,
+                               "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
+                               "x%x SNS x%x x%x Data: x%x x%x\n",
+                               phba->brd_no, vpi, cmd->device->id,
                                cmd->device->lun, cmd, cmd->result,
-                               *lp, *(lp + 3), cmd->retries, cmd->resid);
+                               *lp, *(lp + 3), cmd->retries,
+                               scsi_get_resid(cmd));
        }
 
        result = cmd->result;
        sdev = cmd->device;
+       lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
        cmd->scsi_done(cmd);
 
        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
-               lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
                lpfc_release_scsi_buf(phba, lpfc_cmd);
                return;
        }
 
+
+       if (!result)
+               lpfc_rampup_queue_depth(vport, sdev);
+
        if (!result && pnode != NULL &&
           ((jiffies - pnode->last_ramp_up_time) >
                LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
           ((jiffies - pnode->last_q_full_time) >
                LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
-          (phba->cfg_lun_queue_depth > sdev->queue_depth)) {
+          (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
                shost_for_each_device(tmp_sdev, sdev->host) {
-                       if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) {
+                       if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
                                if (tmp_sdev->id != sdev->id)
                                        continue;
                                if (tmp_sdev->ordered_tags)
@@ -512,7 +666,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                                        tmp_sdev->queue_depth - 1);
                }
                /*
-                * The queue depth cannot be lowered any more.
+                * The queue depth cannot be lowered any more.
                 * Modify the returned error code to store
                 * the final depth value set by
                 * scsi_track_queue_full.
@@ -522,19 +676,20 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
                if (depth) {
                        lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-                               "%d:0711 detected queue full - lun queue depth "
-                               " adjusted to %d.\n", phba->brd_no, depth);
+                                       "%d (%d):0711 detected queue full - "
+                                       "lun queue depth  adjusted to %d.\n",
+                                       phba->brd_no, vpi, depth);
                }
        }
 
-       lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
        lpfc_release_scsi_buf(phba, lpfc_cmd);
 }
 
 static void
-lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
-                       struct lpfc_nodelist *pnode)
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+                   struct lpfc_nodelist *pnode)
 {
+       struct lpfc_hba *phba = vport->phba;
        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
@@ -571,22 +726,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
         * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
         * data bde entry.
         */
-       if (scsi_cmnd->use_sg) {
-               if (datadir == DMA_TO_DEVICE) {
-                       iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
-                       iocb_cmd->un.fcpi.fcpi_parm = 0;
-                       iocb_cmd->ulpPU = 0;
-                       fcp_cmnd->fcpCntl3 = WRITE_DATA;
-                       phba->fc4OutputRequests++;
-               } else {
-                       iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
-                       iocb_cmd->ulpPU = PARM_READ_CHECK;
-                       iocb_cmd->un.fcpi.fcpi_parm =
-                               scsi_cmnd->request_bufflen;
-                       fcp_cmnd->fcpCntl3 = READ_DATA;
-                       phba->fc4InputRequests++;
-               }
-       } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
+       if (scsi_sg_count(scsi_cmnd)) {
                if (datadir == DMA_TO_DEVICE) {
                        iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
                        iocb_cmd->un.fcpi.fcpi_parm = 0;
@@ -596,8 +736,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
                } else {
                        iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
                        iocb_cmd->ulpPU = PARM_READ_CHECK;
-                       iocb_cmd->un.fcpi.fcpi_parm =
-                               scsi_cmnd->request_bufflen;
+                       iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
                        fcp_cmnd->fcpCntl3 = READ_DATA;
                        phba->fc4InputRequests++;
                }
@@ -621,15 +760,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
        piocbq->context1  = lpfc_cmd;
        piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
        piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+       piocbq->vport = vport;
 }
 
 static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
                             struct lpfc_scsi_buf *lpfc_cmd,
                             unsigned int lun,
                             uint8_t task_mgmt_cmd)
 {
-       struct lpfc_sli *psli;
        struct lpfc_iocbq *piocbq;
        IOCB_t *piocb;
        struct fcp_cmnd *fcp_cmnd;
@@ -640,8 +779,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
                return 0;
        }
 
-       psli = &phba->sli;
        piocbq = &(lpfc_cmd->cur_iocbq);
+       piocbq->vport = vport;
+
        piocb = &piocbq->iocb;
 
        fcp_cmnd = lpfc_cmd->fcp_cmnd;
@@ -667,14 +807,27 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
                piocb->ulpTimeout = lpfc_cmd->timeout;
        }
 
-       return (1);
+       return 1;
+}
+
+static void
+lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
+                       struct lpfc_iocbq *cmdiocbq,
+                       struct lpfc_iocbq *rspiocbq)
+{
+       struct lpfc_scsi_buf *lpfc_cmd =
+               (struct lpfc_scsi_buf *) cmdiocbq->context1;
+       if (lpfc_cmd)
+               lpfc_release_scsi_buf(phba, lpfc_cmd);
+       return;
 }
 
 static int
-lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
+lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
                    unsigned  tgt_id, unsigned int lun,
                    struct lpfc_rport_data *rdata)
 {
+       struct lpfc_hba   *phba = vport->phba;
        struct lpfc_iocbq *iocbq;
        struct lpfc_iocbq *iocbqrsp;
        int ret;
@@ -683,12 +836,11 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
                return FAILED;
 
        lpfc_cmd->rdata = rdata;
-       ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
+       ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
                                           FCP_TARGET_RESET);
        if (!ret)
                return FAILED;
 
-       lpfc_cmd->scsi_hba = phba;
        iocbq = &lpfc_cmd->cur_iocbq;
        iocbqrsp = lpfc_sli_get_iocbq(phba);
 
@@ -697,17 +849,18 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
 
        /* Issue Target Reset to TGT <num> */
        lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-                       "%d:0702 Issue Target Reset to TGT %d "
+                       "%d (%d):0702 Issue Target Reset to TGT %d "
                        "Data: x%x x%x\n",
-                       phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
-                       rdata->pnode->nlp_flag);
+                       phba->brd_no, vport->vpi, tgt_id,
+                       rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
 
        ret = lpfc_sli_issue_iocb_wait(phba,
                                       &phba->sli.ring[phba->sli.fcp_ring],
                                       iocbq, iocbqrsp, lpfc_cmd->timeout);
        if (ret != IOCB_SUCCESS) {
+               if (ret == IOCB_TIMEDOUT)
+                       iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
                lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
-               ret = FAILED;
        } else {
                ret = SUCCESS;
                lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
@@ -724,7 +877,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
 const char *
 lpfc_info(struct Scsi_Host *host)
 {
-       struct lpfc_hba    *phba = (struct lpfc_hba *) host->hostdata;
+       struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
        int len;
        static char  lpfcinfobuf[384];
 
@@ -766,26 +920,22 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
 
 void lpfc_poll_timeout(unsigned long ptr)
 {
-       struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
-       unsigned long iflag;
-
-       spin_lock_irqsave(phba->host->host_lock, iflag);
+       struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
 
        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
                lpfc_sli_poll_fcp_ring (phba);
                if (phba->cfg_poll & DISABLE_FCP_RING_INT)
                        lpfc_poll_rearm_timer(phba);
        }
-
-       spin_unlock_irqrestore(phba->host->host_lock, iflag);
 }
 
 static int
 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 {
-       struct lpfc_hba *phba =
-               (struct lpfc_hba *) cmnd->device->host->hostdata;
-       struct lpfc_sli *psli = &phba->sli;
+       struct Scsi_Host  *shost = cmnd->device->host;
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       struct lpfc_sli   *psli = &phba->sli;
        struct lpfc_rport_data *rdata = cmnd->device->hostdata;
        struct lpfc_nodelist *ndlp = rdata->pnode;
        struct lpfc_scsi_buf *lpfc_cmd;
@@ -806,11 +956,14 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
                cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
                goto out_fail_command;
        }
-       lpfc_cmd = lpfc_get_scsi_buf (phba);
+       lpfc_cmd = lpfc_get_scsi_buf(phba);
        if (lpfc_cmd == NULL) {
+               lpfc_adjust_queue_depth(phba);
+
                lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-                               "%d:0707 driver's buffer pool is empty, "
-                               "IO busied\n", phba->brd_no);
+                               "%d (%d):0707 driver's buffer pool is empty, "
+                               "IO busied\n",
+                               phba->brd_no, vport->vpi);
                goto out_host_busy;
        }
 
@@ -828,10 +981,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
        if (err)
                goto out_host_busy_free_buf;
 
-       lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
+       lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
 
        err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
-                               &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+                                 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
        if (err)
                goto out_host_busy_free_buf;
 
@@ -873,8 +1026,9 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
 static int
 lpfc_abort_handler(struct scsi_cmnd *cmnd)
 {
-       struct Scsi_Host *shost = cmnd->device->host;
-       struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+       struct Scsi_Host  *shost = cmnd->device->host;
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
        struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
        struct lpfc_iocbq *iocb;
        struct lpfc_iocbq *abtsiocb;
@@ -884,8 +1038,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        int ret = SUCCESS;
 
        lpfc_block_error_handler(cmnd);
-       spin_lock_irq(shost->host_lock);
-
        lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
        BUG_ON(!lpfc_cmd);
 
@@ -922,12 +1074,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 
        icmd->ulpLe = 1;
        icmd->ulpClass = cmd->ulpClass;
-       if (phba->hba_state >= LPFC_LINK_UP)
+       if (lpfc_is_link_up(phba))
                icmd->ulpCommand = CMD_ABORT_XRI_CN;
        else
                icmd->ulpCommand = CMD_CLOSE_XRI_CN;
 
        abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+       abtsiocb->vport = vport;
        if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
                lpfc_sli_release_iocbq(phba, abtsiocb);
                ret = FAILED;
@@ -943,92 +1096,88 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
                if (phba->cfg_poll & DISABLE_FCP_RING_INT)
                        lpfc_sli_poll_fcp_ring (phba);
 
-               spin_unlock_irq(phba->host->host_lock);
-                       schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
-               spin_lock_irq(phba->host->host_lock);
+               schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
                if (++loop_count
-                   > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
+                   > (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
                        break;
        }
 
        if (lpfc_cmd->pCmd == cmnd) {
                ret = FAILED;
                lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                               "%d:0748 abort handler timed out waiting for "
-                               "abort to complete: ret %#x, ID %d, LUN %d, "
-                               "snum %#lx\n",
-                               phba->brd_no,  ret, cmnd->device->id,
-                               cmnd->device->lun, cmnd->serial_number);
+                               "%d (%d):0748 abort handler timed out waiting "
+                               "for abort to complete: ret %#x, ID %d, "
+                               "LUN %d, snum %#lx\n",
+                               phba->brd_no, vport->vpi, ret,
+                               cmnd->device->id, cmnd->device->lun,
+                               cmnd->serial_number);
        }
 
  out:
        lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-                       "%d:0749 SCSI Layer I/O Abort Request "
+                       "%d (%d):0749 SCSI Layer I/O Abort Request "
                        "Status x%x ID %d LUN %d snum %#lx\n",
-                       phba->brd_no, ret, cmnd->device->id,
+                       phba->brd_no, vport->vpi, ret, cmnd->device->id,
                        cmnd->device->lun, cmnd->serial_number);
 
-       spin_unlock_irq(shost->host_lock);
-
        return ret;
 }
 
 static int
-lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
+lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 {
-       struct Scsi_Host *shost = cmnd->device->host;
-       struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+       struct Scsi_Host  *shost = cmnd->device->host;
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
        struct lpfc_scsi_buf *lpfc_cmd;
        struct lpfc_iocbq *iocbq, *iocbqrsp;
        struct lpfc_rport_data *rdata = cmnd->device->hostdata;
        struct lpfc_nodelist *pnode = rdata->pnode;
        uint32_t cmd_result = 0, cmd_status = 0;
        int ret = FAILED;
+       int iocb_status = IOCB_SUCCESS;
        int cnt, loopcnt;
 
        lpfc_block_error_handler(cmnd);
-       spin_lock_irq(shost->host_lock);
        loopcnt = 0;
        /*
         * If target is not in a MAPPED state, delay the reset until
         * target is rediscovered or devloss timeout expires.
         */
-       while ( 1 ) {
+       while (1) {
                if (!pnode)
-                       return FAILED;
+                       goto out;
 
                if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
-                       spin_unlock_irq(phba->host->host_lock);
                        schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-                       spin_lock_irq(phba->host->host_lock);
                        loopcnt++;
                        rdata = cmnd->device->hostdata;
                        if (!rdata ||
-                               (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
+                               (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
                                lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                                       "%d:0721 LUN Reset rport failure:"
-                                       " cnt x%x rdata x%p\n",
-                                       phba->brd_no, loopcnt, rdata);
+                                               "%d (%d):0721 LUN Reset rport "
+                                               "failure: cnt x%x rdata x%p\n",
+                                               phba->brd_no, vport->vpi,
+                                               loopcnt, rdata);
                                goto out;
                        }
                        pnode = rdata->pnode;
                        if (!pnode)
-                               return FAILED;
+                               goto out;
                }
                if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
                        break;
        }
 
-       lpfc_cmd = lpfc_get_scsi_buf (phba);
+       lpfc_cmd = lpfc_get_scsi_buf(phba);
        if (lpfc_cmd == NULL)
                goto out;
 
        lpfc_cmd->timeout = 60;
-       lpfc_cmd->scsi_hba = phba;
        lpfc_cmd->rdata = rdata;
 
-       ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
-                                          FCP_LUN_RESET);
+       ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
+                                          FCP_TARGET_RESET);
        if (!ret)
                goto out_free_scsi_buf;
 
@@ -1040,16 +1189,22 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
                goto out_free_scsi_buf;
 
        lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-                       "%d:0703 Issue LUN Reset to TGT %d LUN %d "
-                       "Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
+                       "%d (%d):0703 Issue target reset to TGT %d LUN %d "
+                       "rpi x%x nlp_flag x%x\n",
+                       phba->brd_no, vport->vpi, cmnd->device->id,
                        cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
 
-       ret = lpfc_sli_issue_iocb_wait(phba,
+       iocb_status = lpfc_sli_issue_iocb_wait(phba,
                                       &phba->sli.ring[phba->sli.fcp_ring],
                                       iocbq, iocbqrsp, lpfc_cmd->timeout);
-       if (ret == IOCB_SUCCESS)
-               ret = SUCCESS;
 
+       if (iocb_status == IOCB_TIMEDOUT)
+               iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+
+       if (iocb_status == IOCB_SUCCESS)
+               ret = SUCCESS;
+       else
+               ret = iocb_status;
 
        cmd_result = iocbqrsp->iocb.un.ulpWord[4];
        cmd_status = iocbqrsp->iocb.ulpStatus;
@@ -1071,12 +1226,10 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
                                    0, LPFC_CTX_LUN);
        loopcnt = 0;
        while(cnt) {
-               spin_unlock_irq(phba->host->host_lock);
                schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-               spin_lock_irq(phba->host->host_lock);
 
                if (++loopcnt
-                   > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
+                   > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
                        break;
 
                cnt = lpfc_sli_sum_iocb(phba,
@@ -1087,30 +1240,32 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
 
        if (cnt) {
                lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
-                       phba->brd_no, cnt);
+                               "%d (%d):0719 device reset I/O flush failure: "
+                               "cnt x%x\n",
+                               phba->brd_no, vport->vpi, cnt);
                ret = FAILED;
        }
 
 out_free_scsi_buf:
-       lpfc_release_scsi_buf(phba, lpfc_cmd);
-
+       if (iocb_status != IOCB_TIMEDOUT) {
+               lpfc_release_scsi_buf(phba, lpfc_cmd);
+       }
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "%d:0713 SCSI layer issued LUN reset (%d, %d) "
-                       "Data: x%x x%x x%x\n",
-                       phba->brd_no, cmnd->device->id,cmnd->device->lun,
-                       ret, cmd_status, cmd_result);
+                       "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
+                       "return x%x status x%x result x%x\n",
+                       phba->brd_no, vport->vpi, cmnd->device->id,
+                       cmnd->device->lun, ret, cmd_status, cmd_result);
 
 out:
-       spin_unlock_irq(shost->host_lock);
        return ret;
 }
 
 static int
-lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
+lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 {
-       struct Scsi_Host *shost = cmnd->device->host;
-       struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+       struct Scsi_Host  *shost = cmnd->device->host;
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
        struct lpfc_nodelist *ndlp = NULL;
        int match;
        int ret = FAILED, i, err_count = 0;
@@ -1118,7 +1273,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
        struct lpfc_scsi_buf * lpfc_cmd;
 
        lpfc_block_error_handler(cmnd);
-       spin_lock_irq(shost->host_lock);
 
        lpfc_cmd = lpfc_get_scsi_buf(phba);
        if (lpfc_cmd == NULL)
@@ -1126,7 +1280,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
 
        /* The lpfc_cmd storage is reused.  Set all loop invariants. */
        lpfc_cmd->timeout = 60;
-       lpfc_cmd->scsi_hba = phba;
 
        /*
         * Since the driver manages a single bus device, reset all
@@ -1134,31 +1287,41 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
         * fail, this routine returns failure to the midlayer.
         */
        for (i = 0; i < LPFC_MAX_TARGET; i++) {
-               /* Search the mapped list for this target ID */
+               /* Search for mapped node by target ID */
                match = 0;
-               list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
-                       if ((i == ndlp->nlp_sid) && ndlp->rport) {
+               spin_lock_irq(shost->host_lock);
+               list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+                       if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+                           i == ndlp->nlp_sid &&
+                           ndlp->rport) {
                                match = 1;
                                break;
                        }
                }
+               spin_unlock_irq(shost->host_lock);
                if (!match)
                        continue;
 
-               ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
+               ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
+                                         cmnd->device->lun,
                                          ndlp->rport->dd_data);
                if (ret != SUCCESS) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                               "%d:0700 Bus Reset on target %d failed\n",
-                               phba->brd_no, i);
+                                       "%d (%d):0700 Bus Reset on target %d "
+                                       "failed\n",
+                                       phba->brd_no, vport->vpi, i);
                        err_count++;
+                       break;
                }
        }
 
+       if (ret != IOCB_TIMEDOUT)
+               lpfc_release_scsi_buf(phba, lpfc_cmd);
+
        if (err_count == 0)
                ret = SUCCESS;
-
-       lpfc_release_scsi_buf(phba, lpfc_cmd);
+       else
+               ret = FAILED;
 
        /*
         * All outstanding txcmplq I/Os should have been aborted by
@@ -1172,12 +1335,10 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
                                    0, 0, 0, LPFC_CTX_HOST);
        loopcnt = 0;
        while(cnt) {
-               spin_unlock_irq(phba->host->host_lock);
                schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-               spin_lock_irq(phba->host->host_lock);
 
                if (++loopcnt
-                   > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
+                   > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
                        break;
 
                cnt = lpfc_sli_sum_iocb(phba,
@@ -1187,25 +1348,24 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
 
        if (cnt) {
                lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                  "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
-                  phba->brd_no, cnt, i);
+                               "%d (%d):0715 Bus Reset I/O flush failure: "
+                               "cnt x%x left x%x\n",
+                               phba->brd_no, vport->vpi, cnt, i);
                ret = FAILED;
        }
 
-       lpfc_printf_log(phba,
-                       KERN_ERR,
-                       LOG_FCP,
-                       "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
-                       phba->brd_no, ret);
+       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                       "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
+                       phba->brd_no, vport->vpi, ret);
 out:
-       spin_unlock_irq(shost->host_lock);
        return ret;
 }
 
 static int
 lpfc_slave_alloc(struct scsi_device *sdev)
 {
-       struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata;
+       struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
        struct lpfc_scsi_buf *scsi_buf = NULL;
        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
        uint32_t total = 0, i;
@@ -1225,28 +1385,36 @@ lpfc_slave_alloc(struct scsi_device *sdev)
         * extra.  This list of scsi bufs exists for the lifetime of the driver.
         */
        total = phba->total_scsi_bufs;
-       num_to_alloc = phba->cfg_lun_queue_depth + 2;
-       if (total >= phba->cfg_hba_queue_depth) {
+       num_to_alloc = vport->cfg_lun_queue_depth + 2;
+
+       /* Allow some exchanges to be available always to complete discovery */
+       if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-                               "%d:0704 At limitation of %d preallocated "
-                               "command buffers\n", phba->brd_no, total);
+                               "%d (%d):0704 At limitation of %d "
+                               "preallocated command buffers\n",
+                               phba->brd_no, vport->vpi, total);
                return 0;
-       } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
+
+       /* Allow some exchanges to be available always to complete discovery */
+       } else if (total + num_to_alloc >
+               phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-                               "%d:0705 Allocation request of %d command "
-                               "buffers will exceed max of %d.  Reducing "
-                               "allocation request to %d.\n", phba->brd_no,
-                               num_to_alloc, phba->cfg_hba_queue_depth,
+                               "%d (%d):0705 Allocation request of %d "
+                               "command buffers will exceed max of %d.  "
+                               "Reducing allocation request to %d.\n",
+                               phba->brd_no, vport->vpi, num_to_alloc,
+                               phba->cfg_hba_queue_depth,
                                (phba->cfg_hba_queue_depth - total));
                num_to_alloc = phba->cfg_hba_queue_depth - total;
        }
 
        for (i = 0; i < num_to_alloc; i++) {
-               scsi_buf = lpfc_new_scsi_buf(phba);
+               scsi_buf = lpfc_new_scsi_buf(vport);
                if (!scsi_buf) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                                       "%d:0706 Failed to allocate command "
-                                       "buffer\n", phba->brd_no);
+                                       "%d (%d):0706 Failed to allocate "
+                                       "command buffer\n",
+                                       phba->brd_no, vport->vpi);
                        break;
                }
 
@@ -1261,13 +1429,14 @@ lpfc_slave_alloc(struct scsi_device *sdev)
 static int
 lpfc_slave_configure(struct scsi_device *sdev)
 {
-       struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata;
-       struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+       struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       struct fc_rport   *rport = starget_to_rport(sdev->sdev_target);
 
        if (sdev->tagged_supported)
-               scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
+               scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
        else
-               scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
+               scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
 
        /*
         * Initialize the fc transport attributes for the target
@@ -1275,7 +1444,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
         * target pointer is stored in the starget_data for the
         * driver's sysfs entry point functions.
         */
-       rport->dev_loss_tmo = phba->cfg_devloss_tmo;
+       rport->dev_loss_tmo = vport->cfg_devloss_tmo;
 
        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
                lpfc_sli_poll_fcp_ring(phba);
@@ -1293,21 +1462,43 @@ lpfc_slave_destroy(struct scsi_device *sdev)
        return;
 }
 
+
 struct scsi_host_template lpfc_template = {
        .module                 = THIS_MODULE,
        .name                   = LPFC_DRIVER_NAME,
        .info                   = lpfc_info,
        .queuecommand           = lpfc_queuecommand,
        .eh_abort_handler       = lpfc_abort_handler,
-       .eh_device_reset_handler= lpfc_reset_lun_handler,
-       .eh_bus_reset_handler   = lpfc_reset_bus_handler,
+       .eh_device_reset_handler= lpfc_device_reset_handler,
+       .eh_bus_reset_handler   = lpfc_bus_reset_handler,
+       .slave_alloc            = lpfc_slave_alloc,
+       .slave_configure        = lpfc_slave_configure,
+       .slave_destroy          = lpfc_slave_destroy,
+       .scan_finished          = lpfc_scan_finished,
+       .this_id                = -1,
+       .sg_tablesize           = LPFC_SG_SEG_CNT,
+       .cmd_per_lun            = LPFC_CMD_PER_LUN,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .shost_attrs            = lpfc_hba_attrs,
+       .max_sectors            = 0xFFFF,
+};
+
+struct scsi_host_template lpfc_vport_template = {
+       .module                 = THIS_MODULE,
+       .name                   = LPFC_DRIVER_NAME,
+       .info                   = lpfc_info,
+       .queuecommand           = lpfc_queuecommand,
+       .eh_abort_handler       = lpfc_abort_handler,
+       .eh_device_reset_handler= lpfc_device_reset_handler,
+       .eh_bus_reset_handler   = lpfc_bus_reset_handler,
        .slave_alloc            = lpfc_slave_alloc,
        .slave_configure        = lpfc_slave_configure,
        .slave_destroy          = lpfc_slave_destroy,
+       .scan_finished          = lpfc_scan_finished,
        .this_id                = -1,
        .sg_tablesize           = LPFC_SG_SEG_CNT,
        .cmd_per_lun            = LPFC_CMD_PER_LUN,
        .use_clustering         = ENABLE_CLUSTERING,
-       .shost_attrs            = lpfc_host_attrs,
+       .shost_attrs            = lpfc_vport_attrs,
        .max_sectors            = 0xFFFF,
 };