lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
unsigned long iflag = 0;
- /*
- * There are only two special cases to consider. (1) the scsi command
- * requested scatter-gather usage or (2) the scsi command allocated
- * a request buffer, but did not request use_sg. There is a third
- * case, but it does not require resource deallocation.
- */
- if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
- dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
- psb->seg_cnt, psb->pCmd->sc_data_direction);
- } else {
- if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
- dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
- psb->pCmd->request_bufflen,
- psb->pCmd->sc_data_direction);
- }
- }
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
psb->pCmd = NULL;
return 0;
}
+static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+{
+ /*
+ * There are only two special cases to consider. (1) the scsi command
+ * requested scatter-gather usage or (2) the scsi command allocated
+ * a request buffer, but did not request use_sg. There is a third
+ * case, but it does not require resource deallocation.
+ */
+ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
+ dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
+ psb->seg_cnt, psb->pCmd->sc_data_direction);
+ } else {
+ if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
+ dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
+ psb->pCmd->request_bufflen,
+ psb->pCmd->sc_data_direction);
+ }
+ }
+}
+
static void
lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
{
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
}
}
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
return 0;
out_host_busy_free_buf:
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;