spin_unlock_irq(&phba->hbalock);
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
}
- if (phba->hba_flag & HBA_RECEIVE_BUFFER)
- lpfc_sli4_handle_received_buffer(phba);
}
vports = lpfc_create_vport_work_array(phba);
pring = &phba->sli.ring[LPFC_ELS_RING];
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
- if ((status & HA_RXMASK)
- || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+ if ((status & HA_RXMASK) ||
+ (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
+ (phba->hba_flag & HBA_RECEIVE_BUFFER)) {
if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
- if (!remove && ndlp->nlp_type & NLP_FABRIC)
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (!remove && ndlp->nlp_type & NLP_FABRIC))
continue;
rc = lpfc_disc_state_machine(vport, ndlp, NULL,
remove
mempool_free(mboxq, phba->mbox_mem_pool);
return;
}
+ phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
if (vport->port_state != LPFC_FLOGI) {
spin_lock_irqsave(&phba->hbalock, flags);
- phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_initial_flogi(vport);
}
/* If the FCF is not availabe do nothing. */
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
- if (!fcf_mbxq)
+ if (!fcf_mbxq) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return;
+ }
lpfc_reg_fcfi(phba, fcf_mbxq);
fcf_mbxq->vport = phba->pport;
fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+ }
return;
}
uint16_t *vlan_id)
{
struct lpfc_fcf_conn_entry *conn_entry;
+ int i, j, fcf_vlan_id = 0;
+
+ /* Find the lowest VLAN id in the FCF record */
+ for (i = 0; i < 512; i++) {
+ if (new_fcf_record->vlan_bitmap[i]) {
+ fcf_vlan_id = i * 8;
+ j = 0;
+ while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
+ j++;
+ fcf_vlan_id++;
+ }
+ break;
+ }
+ }
/* If FCF not available return 0 */
if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
if (*addr_mode & LPFC_FCF_FPMA)
*addr_mode = LPFC_FCF_FPMA;
- *vlan_id = 0xFFFF;
+ /* If FCF record report a vlan id use that vlan id */
+ if (fcf_vlan_id)
+ *vlan_id = fcf_vlan_id;
+ else
+ *vlan_id = 0xFFFF;
return 1;
}
(*addr_mode & LPFC_FCF_FPMA))
*addr_mode = LPFC_FCF_FPMA;
+ /* If matching connect list has a vlan id, use it */
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
*vlan_id = conn_entry->conn_rec.vlan_tag;
+ /*
+ * If no vlan id is specified in connect list, use the vlan id
+ * in the FCF record
+ */
+ else if (fcf_vlan_id)
+ *vlan_id = fcf_vlan_id;
else
*vlan_id = 0xFFFF;
if (phba->link_state >= LPFC_LINK_UP)
lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+ else
+ /*
+ * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
+ * flag
+ */
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
if (unreg_fcf) {
spin_lock_irq(&phba->hbalock);
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+ phba->link_events++;
if (la->attType == AT_LINK_UP && (!la->mm)) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
if (lpfc_fcf_inuse(phba))
return;
+ /* At this point, all discovery is aborted */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
/* Unregister VPIs */
vports = lpfc_create_vport_work_array(phba);
/* Free the current connect table */
list_for_each_entry_safe(conn_entry, next_conn_entry,
- &phba->fcf_conn_rec_list, list)
+ &phba->fcf_conn_rec_list, list) {
+ list_del_init(&conn_entry->list);
kfree(conn_entry);
+ }
conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
record_count = conn_hdr->length * sizeof(uint32_t)/
uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
int rc;
+ phba->fc_eventTag = acqe_fcoe->event_tag;
phba->fcoe_eventtag = acqe_fcoe->event_tag;
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
struct lpfc_acqe_dcbx *acqe_dcbx)
{
+ phba->fc_eventTag = acqe_dcbx->event_tag;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0290 The SLI4 DCBX asynchronous event is not "
"handled yet\n");
/* Free the current connect table */
list_for_each_entry_safe(conn_entry, next_conn_entry,
- &phba->fcf_conn_rec_list, list)
+ &phba->fcf_conn_rec_list, list) {
+ list_del_init(&conn_entry->list);
kfree(conn_entry);
+ }
return;
}
}
phba->sli4_hba.els_cq = qdesc;
- /* Create slow-path Unsolicited Receive Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0502 Failed allocate slow-path USOL RX CQ\n");
- goto out_free_els_cq;
- }
- phba->sli4_hba.rxq_cq = qdesc;
/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2577 Failed allocate memory for fast-path "
"CQ record array\n");
- goto out_free_rxq_cq;
+ goto out_free_els_cq;
}
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
}
kfree(phba->sli4_hba.fcp_cq);
-out_free_rxq_cq:
- lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
- phba->sli4_hba.rxq_cq = NULL;
out_free_els_cq:
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL;
lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
phba->sli4_hba.dat_rq = NULL;
- /* Release unsolicited receive complete queue */
- lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
- phba->sli4_hba.rxq_cq = NULL;
-
/* Release ELS complete queue */
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL;
phba->sli4_hba.els_cq->queue_id,
phba->sli4_hba.sp_eq->queue_id);
- /* Set up slow-path Unsolicited Receive Complete Queue */
- if (!phba->sli4_hba.rxq_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0532 USOL RX CQ not allocated\n");
- goto out_destroy_els_cq;
- }
- rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
- LPFC_RCQ, LPFC_USOL);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0533 Failed setup of slow-path USOL RX CQ: "
- "rc = 0x%x\n", rc);
- goto out_destroy_els_cq;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
- phba->sli4_hba.rxq_cq->queue_id,
- phba->sli4_hba.sp_eq->queue_id);
-
/* Set up fast-path FCP Response Complete Queue */
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
goto out_destroy_fcp_wq;
}
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
- phba->sli4_hba.rxq_cq, LPFC_USOL);
+ phba->sli4_hba.els_cq, LPFC_USOL);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0541 Failed setup of Receive Queue: "
"parent cq-id=%d\n",
phba->sli4_hba.hdr_rq->queue_id,
phba->sli4_hba.dat_rq->queue_id,
- phba->sli4_hba.rxq_cq->queue_id);
+ phba->sli4_hba.els_cq->queue_id);
return 0;
out_destroy_fcp_wq:
out_destroy_fcp_cq:
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
- lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
-out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
/* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
- /* Unset unsolicited receive complete queue */
- lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
/* Unset FCP response complete queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
struct lpfc_sli_ring *pring, uint32_t mask)
{
struct lpfc_iocbq *irspiocbq;
+ struct hbq_dmabuf *dmabuf;
+ struct lpfc_cq_event *cq_event;
unsigned long iflag;
while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
/* Get the response iocb from the head of work queue */
spin_lock_irqsave(&phba->hbalock, iflag);
list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
- irspiocbq, struct lpfc_iocbq, list);
+ cq_event, struct lpfc_cq_event, list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- /* Process the response iocb */
- lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
+
+ switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+ case CQE_CODE_COMPL_WQE:
+ irspiocbq = container_of(cq_event, struct lpfc_iocbq,
+ cq_event);
+ lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
+ break;
+ case CQE_CODE_RECEIVE:
+ dmabuf = container_of(cq_event, struct hbq_dmabuf,
+ cq_event);
+ lpfc_sli4_handle_received_buffer(phba, dmabuf);
+ break;
+ default:
+ break;
+ }
}
}
/* perform board reset */
phba->fc_eventTag = 0;
+ phba->link_events = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
/* perform board reset */
phba->fc_eventTag = 0;
+ phba->link_events = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
list_del_init(&phba->sli4_hba.dat_rq->list);
list_del_init(&phba->sli4_hba.mbx_cq->list);
list_del_init(&phba->sli4_hba.els_cq->list);
- list_del_init(&phba->sli4_hba.rxq_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
sizeof(struct lpfc_iocbq) - offset);
- memset(&pIocbIn->sli4_info, 0,
- sizeof(struct lpfc_sli4_rspiocb_info));
+ pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe;
/* Map WCQE parameters into irspiocb parameters */
pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
if (pIocbOut->iocb_flag & LPFC_IO_FCP)
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- /* Load in additional WCQE parameters */
- pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
- pIocbIn->sli4_info.bfield = 0;
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
- pIocbIn->sli4_info.bfield |= LPFC_XB;
- if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
- pIocbIn->sli4_info.bfield |= LPFC_PV;
- pIocbIn->sli4_info.priority =
- bf_get(lpfc_wcqe_c_priority, wcqe);
- }
}
/**
/* Add the irspiocb to the response IOCB work list */
spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
+ list_add_tail(&irspiocbq->cq_event.list,
+ &phba->sli4_hba.sp_rspiocb_work_queue);
/* Indicate ELS ring attention */
phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
spin_unlock_irqrestore(&phba->hbalock, iflags);
return workposted;
}
-/**
- * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
- * @phba: Pointer to HBA context object.
- * @cq: Pointer to the completion queue.
- * @wcqe: Pointer to a completion queue entry.
- *
- * This routine process a slow-path work-queue completion queue entry.
- *
- * Return: true if work posted to worker thread, otherwise false.
- **/
-static bool
-lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
- struct lpfc_cqe *cqe)
-{
- struct lpfc_wcqe_complete wcqe;
- bool workposted = false;
-
- /* Copy the work queue CQE and convert endian order if needed */
- lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
-
- /* Check and process for different type of WCQE and dispatch */
- switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
- case CQE_CODE_COMPL_WQE:
- /* Process the WQ complete event */
- workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
- (struct lpfc_wcqe_complete *)&wcqe);
- break;
- case CQE_CODE_RELEASE_WQE:
- /* Process the WQ release event */
- lpfc_sli4_sp_handle_rel_wcqe(phba,
- (struct lpfc_wcqe_release *)&wcqe);
- break;
- case CQE_CODE_XRI_ABORTED:
- /* Process the WQ XRI abort event */
- workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
- (struct sli4_wcqe_xri_aborted *)&wcqe);
- break;
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0388 Not a valid WCQE code: x%x\n",
- bf_get(lpfc_wcqe_c_code, &wcqe));
- break;
- }
- return workposted;
-}
-
/**
* lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
* @phba: Pointer to HBA context object.
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
-lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{
- struct lpfc_rcqe rcqe;
bool workposted = false;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
uint32_t status;
unsigned long iflags;
- /* Copy the receive queue CQE and convert endian order if needed */
- lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
lpfc_sli4_rq_release(hrq, drq);
- if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
+ if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE)
goto out;
- if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
+ if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
goto out;
- status = bf_get(lpfc_rcqe_status, &rcqe);
+ status = bf_get(lpfc_rcqe_status, rcqe);
switch (status) {
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
- memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
+ memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
/* save off the frame for the word thread to process */
- list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
+ list_add_tail(&dma_buf->cq_event.list,
+ &phba->sli4_hba.sp_rspiocb_work_queue);
/* Frame received */
phba->hba_flag |= HBA_RECEIVE_BUFFER;
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
+/**
+ * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue or recieve queue completion queue
+ * entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
+{
+ struct lpfc_wcqe_complete wcqe;
+ bool workposted = false;
+
+ /* Copy the work queue CQE and convert endian order if needed */
+ lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+ /* Check and process for different type of WCQE and dispatch */
+ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+ case CQE_CODE_COMPL_WQE:
+ /* Process the WQ complete event */
+ workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+ (struct lpfc_wcqe_complete *)&wcqe);
+ break;
+ case CQE_CODE_RELEASE_WQE:
+ /* Process the WQ release event */
+ lpfc_sli4_sp_handle_rel_wcqe(phba,
+ (struct lpfc_wcqe_release *)&wcqe);
+ break;
+ case CQE_CODE_XRI_ABORTED:
+ /* Process the WQ XRI abort event */
+ workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+ (struct sli4_wcqe_xri_aborted *)&wcqe);
+ break;
+ case CQE_CODE_RECEIVE:
+ /* Process the RQ event */
+ workposted = lpfc_sli4_sp_handle_rcqe(phba,
+ (struct lpfc_rcqe *)&wcqe);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0388 Not a valid WCQE code: x%x\n",
+ bf_get(lpfc_wcqe_c_code, &wcqe));
+ break;
+ }
+ return workposted;
+}
+
/**
* lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
* @phba: Pointer to HBA context object.
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
- if (!(++ecount % LPFC_GET_QE_REL_INT))
- lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
- }
- break;
- case LPFC_RCQ:
- while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
struct hbq_dmabuf *seq_dmabuf = NULL;
struct hbq_dmabuf *temp_dmabuf = NULL;
+ INIT_LIST_HEAD(&dmabuf->dbuf.list);
new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
/* Use the hdr_buf to find the sequence that this frame belongs to */
list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
}
temp_hdr = seq_dmabuf->hbuf.virt;
if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
- list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
+ list_del_init(&seq_dmabuf->hbuf.list);
+ list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
return dmabuf;
}
/* find the correct place in the sequence to insert this frame */
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.un.rcvels.remoteID = sid;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
+ bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
}
iocbq = first_iocbq;
/*
iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
+ bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
+ bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
iocbq->iocb.un.rcvels.remoteID = sid;
list_add_tail(&iocbq->list, &first_iocbq->list);
}
* Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
* appropriate receive function when the final frame in a sequence is received.
**/
-int
-lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
+void
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
+ struct hbq_dmabuf *dmabuf)
{
- LIST_HEAD(cmplq);
- struct hbq_dmabuf *dmabuf, *seq_dmabuf;
+ struct hbq_dmabuf *seq_dmabuf;
struct fc_frame_header *fc_hdr;
struct lpfc_vport *vport;
uint32_t fcfi;
/* Clear hba flag and get all received buffers into the cmplq */
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
- list_splice_init(&phba->rb_pend_list, &cmplq);
spin_unlock_irq(&phba->hbalock);
/* Process each received buffer */
- while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
- fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
- /* check to see if this a valid type of frame */
- if (lpfc_fc_frame_check(phba, fc_hdr)) {
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
- continue;
- }
- fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
- vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
- if (!vport) {
- /* throw out the frame */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
- continue;
- }
- /* Link this frame */
- seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
- if (!seq_dmabuf) {
- /* unable to add frame to vport - throw it out */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
- continue;
- }
- /* If not last frame in sequence continue processing frames. */
- if (!lpfc_seq_complete(seq_dmabuf)) {
- /*
- * When saving off frames post a new one and mark this
- * frame to be freed when it is finished.
- **/
- lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
- dmabuf->tag = -1;
- continue;
- }
- fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
- iocbq = lpfc_prep_seq(vport, seq_dmabuf);
- if (!lpfc_complete_unsol_iocb(phba,
- &phba->sli.ring[LPFC_ELS_RING],
- iocbq, fc_hdr->fh_r_ctl,
- fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "2540 Ring %d handler: unexpected Rctl "
- "x%x Type x%x received\n",
- LPFC_ELS_RING,
- fc_hdr->fh_r_ctl, fc_hdr->fh_type);
- };
- return 0;
+ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* check to see if this a valid type of frame */
+ if (lpfc_fc_frame_check(phba, fc_hdr)) {
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
+ vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+ if (!vport) {
+ /* throw out the frame */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ /* Link this frame */
+ seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+ if (!seq_dmabuf) {
+ /* unable to add frame to vport - throw it out */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ /* If not last frame in sequence continue processing frames. */
+ if (!lpfc_seq_complete(seq_dmabuf)) {
+ /*
+ * When saving off frames post a new one and mark this
+ * frame to be freed when it is finished.
+ **/
+ lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
+ dmabuf->tag = -1;
+ return;
+ }
+ fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+ if (!lpfc_complete_unsol_iocb(phba,
+ &phba->sli.ring[LPFC_ELS_RING],
+ iocbq, fc_hdr->fh_r_ctl,
+ fc_hdr->fh_type))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2540 Ring %d handler: unexpected Rctl "
+ "x%x Type x%x received\n",
+ LPFC_ELS_RING,
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
}
/**
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2000 Failed to allocate mbox for "
"READ_FCF cmd\n");
- return -ENOMEM;
+ error = -ENOMEM;
+ goto fail_fcfscan;
}
req_len = sizeof(struct fcf_record) +
"0291 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto fail_fcfscan;
}
/* Get the first SGE entry from the non-embedded DMA memory. This
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2527 Failed to get the non-embedded SGE "
"virtual address\n");
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto fail_fcfscan;
}
virt_addr = mboxq->sge_array->addr[0];
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
error = -EIO;
} else {
spin_lock_irq(&phba->hbalock);
spin_unlock_irq(&phba->hbalock);
error = 0;
}
+fail_fcfscan:
+ if (error) {
+ if (mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
+ }
return error;
}