uint32_t buffer_tag; /* used for tagged queue ring */
};
+struct lpfc_nvmet_ctxbuf {
+ struct list_head list;
+ struct lpfc_nvmet_rcv_ctx *context;
+ struct lpfc_iocbq *iocbq;
+ struct lpfc_sglq *sglq;
+};
+
struct lpfc_dma_pool {
struct lpfc_dmabuf *elements;
uint32_t max_count;
struct lpfc_dmabuf dbuf;
uint16_t total_size;
uint16_t bytes_recv;
- void *context;
- struct lpfc_iocbq *iocbq;
- struct lpfc_sglq *sglq;
struct lpfc_queue *hrq; /* ptr to associated Header RQ */
struct lpfc_queue *drq; /* ptr to associated Data RQ */
};
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq;
- uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_enable_nvmet;
uint32_t cfg_nvme_enable_fb;
uint32_t cfg_nvmet_fb_size;
1, 1, 16,
"Specify number of RQ pairs for processing NVMET cmds");
-/*
- * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
- *
- */
-LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
- LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
- "Specify number of buffers to post on every MRQ");
-
/*
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
&dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq,
- &dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb,
&dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg,
lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
- lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
- phba->cfg_nvmet_mrq_post = 0;
phba->cfg_nvmet_fb_size = 0;
}
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
+void lpfc_free_iocb_list(struct lpfc_hba *phba);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
-void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
- struct lpfc_dmabuf *mp);
+void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
+ struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
-int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
- struct lpfc_queue *dq, int count);
int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
- lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
}
{
struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
uint16_t i, lxri, xri_cnt, els_xri_cnt;
- uint16_t nvmet_xri_cnt, tot_cnt;
+ uint16_t nvmet_xri_cnt;
LIST_HEAD(nvmet_sgl_list);
int rc;
* update on pci function's nvmet xri-sgl list
*/
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
- nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
- /* Ensure we at least meet the minimun for the system */
- if (nvmet_xri_cnt < LPFC_NVMET_RQE_DEF_COUNT)
- nvmet_xri_cnt = LPFC_NVMET_RQE_DEF_COUNT;
-
- tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
- if (nvmet_xri_cnt > tot_cnt) {
- phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
- nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "6301 NVMET post-sgl count changed to %d\n",
- phba->cfg_nvmet_mrq_post);
- }
+ /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
+ nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
+
/* Fast-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
*
* This routine is invoked to free the driver's IOCB list and memory.
**/
-static void
+void
lpfc_free_iocb_list(struct lpfc_hba *phba)
{
struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
* 0 - successful
* other values - error
**/
-static int
+int
lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
{
struct lpfc_iocbq *iocbq_entry = NULL;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
}
-int
-lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
- struct lpfc_queue *drq, int count)
-{
- int rc, i;
- struct lpfc_rqe hrqe;
- struct lpfc_rqe drqe;
- struct lpfc_rqb *rqbp;
- struct rqb_dmabuf *rqb_buffer;
- LIST_HEAD(rqb_buf_list);
-
- rqbp = hrq->rqbp;
- for (i = 0; i < count; i++) {
- rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
- if (!rqb_buffer)
- break;
- rqb_buffer->hrq = hrq;
- rqb_buffer->drq = drq;
- list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
- }
- while (!list_empty(&rqb_buf_list)) {
- list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
- hbuf.list);
-
- hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
- hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
- drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
- drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
- rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
- if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_buffer);
- } else {
- list_add_tail(&rqb_buffer->hbuf.list,
- &rqbp->rqb_buffer_list);
- rqbp->buffer_count++;
- }
- }
- return 1;
-}
-
int
lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
{
struct lpfc_hba *phba;
struct lpfc_vport *vport = NULL;
struct Scsi_Host *shost = NULL;
- int error, cnt, num;
+ int error;
uint32_t cfg_mode, intr_mode;
/* Allocate memory for HBA structure */
goto out_unset_pci_mem_s4;
}
- cnt = phba->cfg_iocb_cnt * 1024;
- if (phba->nvmet_support) {
- /* Ensure we at least meet the minimun for the system */
- num = (phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq);
- if (num < LPFC_NVMET_RQE_DEF_COUNT)
- num = LPFC_NVMET_RQE_DEF_COUNT;
- cnt += num;
- }
-
- /* Initialize and populate the iocb list per host */
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
- error = lpfc_init_iocb_list(phba, cnt);
-
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1413 Failed to initialize iocb list.\n");
- goto out_unset_driver_resource_s4;
- }
-
INIT_LIST_HEAD(&phba->active_rrq_list);
INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1414 Failed to set up driver resource.\n");
- goto out_free_iocb_list;
+ goto out_unset_driver_resource_s4;
}
/* Get the default values for Model Name and Description */
lpfc_destroy_shost(phba);
out_unset_driver_resource:
lpfc_unset_driver_resource_phase2(phba);
-out_free_iocb_list:
- lpfc_free_iocb_list(phba);
out_unset_driver_resource_s4:
lpfc_sli4_driver_resource_unset(phba);
out_unset_pci_mem_s4:
lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{
struct rqb_dmabuf *dma_buf;
- struct lpfc_iocbq *nvmewqe;
- union lpfc_wqe128 *wqe;
dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
if (!dma_buf)
return NULL;
}
dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
-
- dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
- GFP_KERNEL);
- if (!dma_buf->context) {
- pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
- dma_buf->dbuf.phys);
- pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
- dma_buf->hbuf.phys);
- kfree(dma_buf);
- return NULL;
- }
-
- dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
- if (!dma_buf->iocbq) {
- kfree(dma_buf->context);
- pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
- dma_buf->dbuf.phys);
- pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
- dma_buf->hbuf.phys);
- kfree(dma_buf);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
- "2621 Ran out of nvmet iocb/WQEs\n");
- return NULL;
- }
- dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
- nvmewqe = dma_buf->iocbq;
- wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
- /* Initialize WQE */
- memset(wqe, 0, sizeof(union lpfc_wqe));
- /* Word 7 */
- bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
- bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
- bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
- /* Word 10 */
- bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
- bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
- bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
-
- dma_buf->iocbq->context1 = NULL;
- spin_lock(&phba->sli4_hba.sgl_list_lock);
- dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- if (!dma_buf->sglq) {
- lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
- kfree(dma_buf->context);
- pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
- dma_buf->dbuf.phys);
- pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
- dma_buf->hbuf.phys);
- kfree(dma_buf);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
- "6132 Ran out of nvmet XRIs\n");
- return NULL;
- }
return dma_buf;
}
void
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
{
- unsigned long flags;
-
- __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
- dmab->sglq->state = SGL_FREED;
- dmab->sglq->ndlp = NULL;
-
- spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
- list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
- spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
-
- lpfc_sli_release_iocbq(phba, dmab->iocbq);
- kfree(dmab->context);
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_nvmet_drb_pool,
dmab->dbuf.virt, dmab->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
(rqbp->rqb_free_buffer)(phba, rqb_entry);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6409 Cannot post to RQ %d: %x %x\n",
+ rqb_entry->hrq->queue_id,
+ rqb_entry->hrq->host_index,
+ rqb_entry->hrq->hba_index);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
}
/**
- * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
+ * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
* @phba: HBA buffer is associated with
* @ctxp: context to clean up
* @mp: Buffer to free
* Returns: None
**/
void
-lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
- struct lpfc_dmabuf *mp)
+lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
{
- if (ctxp) {
- if (ctxp->flag)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6314 rq_post ctx xri x%x flag x%x\n",
- ctxp->oxid, ctxp->flag);
-
- if (ctxp->txrdy) {
- pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
- ctxp->txrdy_phys);
- ctxp->txrdy = NULL;
- ctxp->txrdy_phys = 0;
- }
- ctxp->state = LPFC_NVMET_STE_FREE;
+ struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+ unsigned long iflag;
+
+ if (ctxp->txrdy) {
+ pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+ ctxp->txrdy_phys);
+ ctxp->txrdy = NULL;
+ ctxp->txrdy_phys = 0;
}
- lpfc_rq_buf_free(phba, mp);
+ ctxp->state = LPFC_NVMET_STE_FREE;
+
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+ list_add_tail(&ctx_buf->list,
+ &phba->sli4_hba.lpfc_nvmet_ctx_list);
+ phba->sli4_hba.nvmet_ctx_cnt++;
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (aborting)
return;
- lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
};
+void
+lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+{
+ struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
+ unsigned long flags;
+
+ list_for_each_entry_safe(
+ ctx_buf, next_ctx_buf,
+ &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
+ spin_lock_irqsave(
+ &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ list_del_init(&ctx_buf->list);
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ __lpfc_clear_active_sglq(phba,
+ ctx_buf->sglq->sli4_lxritag);
+ ctx_buf->sglq->state = SGL_FREED;
+ ctx_buf->sglq->ndlp = NULL;
+
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
+ list_add_tail(&ctx_buf->sglq->list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
+ flags);
+
+ lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+ kfree(ctx_buf->context);
+ }
+}
+
+int
+lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
+{
+ struct lpfc_nvmet_ctxbuf *ctx_buf;
+ struct lpfc_iocbq *nvmewqe;
+ union lpfc_wqe128 *wqe;
+ int i;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+ "6403 Allocate NVMET resources for %d XRIs\n",
+ phba->sli4_hba.nvmet_xri_cnt);
+
+ /* For all nvmet xris, allocate resources needed to process a
+ * received command on a per xri basis.
+ */
+ for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
+ ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
+ if (!ctx_buf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6404 Ran out of memory for NVMET\n");
+ return -ENOMEM;
+ }
+
+ ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
+ GFP_KERNEL);
+ if (!ctx_buf->context) {
+ kfree(ctx_buf);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6405 Ran out of NVMET "
+ "context memory\n");
+ return -ENOMEM;
+ }
+ ctx_buf->context->ctxbuf = ctx_buf;
+
+ ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
+ if (!ctx_buf->iocbq) {
+ kfree(ctx_buf->context);
+ kfree(ctx_buf);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6406 Ran out of NVMET iocb/WQEs\n");
+ return -ENOMEM;
+ }
+ ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+ nvmewqe = ctx_buf->iocbq;
+ wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
+ /* Initialize WQE */
+ memset(wqe, 0, sizeof(union lpfc_wqe));
+ /* Word 7 */
+ bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
+ bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+ bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
+ /* Word 10 */
+ bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
+ bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
+
+ ctx_buf->iocbq->context1 = NULL;
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ if (!ctx_buf->sglq) {
+ lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+ kfree(ctx_buf->context);
+ kfree(ctx_buf);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6407 Ran out of NVMET XRIs\n");
+ return -ENOMEM;
+ }
+ spin_lock(&phba->sli4_hba.nvmet_io_lock);
+ list_add_tail(&ctx_buf->list,
+ &phba->sli4_hba.lpfc_nvmet_ctx_list);
+ spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+ }
+ phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
+ return 0;
+}
+
int
lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_nvmet_tgtport *tgtp;
struct nvmet_fc_port_info pinfo;
- int error = 0;
+ int error;
if (phba->targetport)
return 0;
+ error = lpfc_nvmet_setup_io_context(phba);
+ if (error)
+ return error;
+
memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
&phba->pcidev->dev,
&phba->targetport);
#else
- error = -ENOMEM;
+ error = -ENOENT;
#endif
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6025 Cannot register NVME targetport "
"x%x\n", error);
phba->targetport = NULL;
+
+ lpfc_nvmet_cleanup_io_context(phba);
+
} else {
tgtp = (struct lpfc_nvmet_tgtport *)
phba->targetport->private;
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
- if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+ if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue;
/* Check if we already received a free context call
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
lpfc_set_rrq_active(phba, ndlp,
- ctxp->rqb_buffer->sglq->sli4_lxritag,
+ ctxp->ctxbuf->sglq->sli4_lxritag,
rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
}
"6318 XB aborted %x flg x%x (%x)\n",
ctxp->oxid, ctxp->flag, released);
if (released)
- lpfc_nvmet_rq_post(phba, ctxp,
- &ctxp->rqb_buffer->hbuf);
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+
if (rrq_empty)
lpfc_worker_wake_up(phba);
return;
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
- if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+ if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue;
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
init_completion(&tgtp->tport_unreg_done);
nvmet_fc_unregister_targetport(phba->targetport);
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+ lpfc_nvmet_cleanup_io_context(phba);
}
phba->targetport = NULL;
#endif
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
+ struct lpfc_nvmet_ctxbuf *ctx_buf;
uint32_t *payload;
uint32_t size, oxid, sid, rc;
+ unsigned long iflag;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t id;
#endif
+ ctx_buf = NULL;
if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6157 FCP Drop IO\n");
+ "6157 NVMET FCP Drop IO\n");
oxid = 0;
size = 0;
sid = 0;
goto dropit;
}
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+ if (phba->sli4_hba.nvmet_ctx_cnt) {
+ list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
+ ctx_buf, struct lpfc_nvmet_ctxbuf, list);
+ phba->sli4_hba.nvmet_ctx_cnt--;
+ }
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+
+ if (!ctx_buf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6408 No NVMET ctx Drop IO\n");
+ oxid = 0;
+ size = 0;
+ sid = 0;
+ ctxp = NULL;
+ goto dropit;
+ }
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
sid = sli4_sid_from_fc_hdr(fc_hdr);
- ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
- if (ctxp == NULL) {
- atomic_inc(&tgtp->rcv_fcp_cmd_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6158 FCP Drop IO x%x: Alloc\n",
- oxid);
- lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
- /* Cannot send ABTS without context */
- return;
- }
+ ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
memset(ctxp, 0, sizeof(ctxp->ctx));
ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->oxid = oxid;
ctxp->sid = sid;
ctxp->state = LPFC_NVMET_STE_RCV;
- ctxp->rqb_buffer = nvmebuf;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
+ ctxp->ctxbuf = ctx_buf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
* The calling sequence should be:
* nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
* lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+ * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
+ * the NVME command / FC header is stored, so we are free to repost
+ * the buffer.
*/
rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
payload, size);
/* Process FCP command */
if (rc == 0) {
atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
}
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
oxid, size, sid);
if (oxid) {
+ lpfc_nvmet_defer_release(phba, ctxp);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
}
- if (nvmebuf) {
- nvmebuf->iocbq->hba_wqidx = 0;
- /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
- lpfc_nvmet_rq_post(phba, ctxp, &nvmebuf->hbuf);
- }
+ if (ctx_buf)
+ lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
+
+ if (nvmebuf)
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
#endif
}
uint64_t isr_timestamp)
{
if (phba->nvmet_support == 0) {
- lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
}
lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
nvmewqe = ctxp->wqeq;
if (nvmewqe == NULL) {
/* Allocate buffer for command wqe */
- nvmewqe = ctxp->rqb_buffer->iocbq;
+ nvmewqe = ctxp->ctxbuf->iocbq;
if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6110 lpfc_nvmet_prep_fcp_wqe: No "
return NULL;
}
- sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
+ sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
switch (rsp->op) {
case NVMET_FCOP_READDATA:
case NVMET_FCOP_READDATA_RSP:
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
*/
if (released)
- lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ /* This is the iocbq for the abort, not the command */
lpfc_sli_release_iocbq(phba, cmdwqe);
/* Since iaab/iaar are NOT set, there is no work left.
ctxp->oxid, ctxp->flag, released,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
+
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
*/
if (released)
- lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
-
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
/* Since iaab/iaar are NOT set, there is no work left.
* For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
sid, xri, ctxp->wqeq->sli4_xritag);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (!ctxp->wqeq) {
- ctxp->wqeq = ctxp->rqb_buffer->iocbq;
- ctxp->wqeq->hba_wqidx = 0;
- }
ndlp = lpfc_findnode_did(phba->pport, sid);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctxp->wqeq) {
- ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+ ctxp->wqeq = ctxp->ctxbuf->iocbq;
ctxp->wqeq->hba_wqidx = 0;
}
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctxp->wqeq) {
- ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+ ctxp->wqeq = ctxp->ctxbuf->iocbq;
ctxp->wqeq->hba_wqidx = 0;
}
}
abts_wqeq = ctxp->wqeq;
wqe_abts = &abts_wqeq->wqe;
+
lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
spin_lock_irqsave(&phba->hbalock, flags);
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
struct rqb_dmabuf *rqb_buffer;
+ struct lpfc_nvmet_ctxbuf *ctxbuf;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t ts_isr_cmd;
(phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
}
+static int
+lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ struct lpfc_queue *drq, int count)
+{
+ int rc, i;
+ struct lpfc_rqe hrqe;
+ struct lpfc_rqe drqe;
+ struct lpfc_rqb *rqbp;
+ struct rqb_dmabuf *rqb_buffer;
+ LIST_HEAD(rqb_buf_list);
+
+ rqbp = hrq->rqbp;
+ for (i = 0; i < count; i++) {
+ /* IF RQ is already full, don't bother */
+ if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+ break;
+ rqb_buffer = rqbp->rqb_alloc_buffer(phba);
+ if (!rqb_buffer)
+ break;
+ rqb_buffer->hrq = hrq;
+ rqb_buffer->drq = drq;
+ list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
+ }
+ while (!list_empty(&rqb_buf_list)) {
+ list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
+ hbuf.list);
+
+ hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
+ hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
+ drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
+ drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
+ rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
+ if (rc < 0) {
+ rqbp->rqb_free_buffer(phba, rqb_buffer);
+ } else {
+ list_add_tail(&rqb_buffer->hbuf.list,
+ &rqbp->rqb_buffer_list);
+ rqbp->buffer_count++;
+ }
+ }
+ return 1;
+}
+
/**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object.
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc, i;
+ int rc, i, cnt;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
goto out_destroy_queue;
}
phba->sli4_hba.nvmet_xri_cnt = rc;
+
+ cnt = phba->cfg_iocb_cnt * 1024;
+ /* We need 1 iocbq for every SGL, for IO processing */
+ cnt += phba->sli4_hba.nvmet_xri_cnt;
+ /* Initialize and populate the iocb list per host */
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2821 initialize iocb list %d total %d\n",
+ phba->cfg_iocb_cnt, cnt);
+ rc = lpfc_init_iocb_list(phba, cnt);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1413 Failed to init iocb list.\n");
+ goto out_destroy_queue;
+ }
+
lpfc_nvmet_create_targetport(phba);
} else {
/* update host scsi xri-sgl sizes and mappings */
"and mapping: %d\n", rc);
goto out_destroy_queue;
}
+
+ cnt = phba->cfg_iocb_cnt * 1024;
+ /* Initialize and populate the iocb list per host */
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2820 initialize iocb list %d total %d\n",
+ phba->cfg_iocb_cnt, cnt);
+ rc = lpfc_init_iocb_list(phba, cnt);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6301 Failed to init iocb list.\n");
+ goto out_destroy_queue;
+ }
}
if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
-
/* Post initial buffers to all RQs created */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[i],
phba->sli4_hba.nvmet_mrq_data[i],
- phba->cfg_nvmet_mrq_post);
+ LPFC_NVMET_RQE_DEF_COUNT);
}
}
/* Unset all the queues set up in this routine when error out */
lpfc_sli4_queue_unset(phba);
out_destroy_queue:
+ lpfc_free_iocb_list(phba);
lpfc_sli4_queue_destroy(phba);
out_stop_timers:
lpfc_stop_hba_timers(phba);
spin_lock_irqsave(&pring->ring_lock, iflags);
ctxp = pwqe->context2;
- sglq = ctxp->rqb_buffer->sglq;
+ sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) {
pwqe->sli4_lxritag = sglq->sli4_lxritag;
pwqe->sli4_xritag = sglq->sli4_xritag;
uint16_t scsi_xri_start;
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
+ uint16_t nvmet_ctx_cnt;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
struct list_head lpfc_nvmet_sgl_list;
struct list_head lpfc_abts_nvmet_ctx_list;
+ struct list_head lpfc_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
struct lpfc_sglq **lpfc_sglq_active_list;
uint16_t num_online_cpu;
uint16_t num_present_cpu;
uint16_t curr_disp_cpu;
-
- uint16_t nvmet_mrq_post_idx;
};
enum lpfc_sge_type {