BFA_TRC_FILE(HAL, CORE);
-/**
+/*
* BFA IOC FC related definitions
*/
-/**
+/*
* IOC local definitions
*/
#define BFA_IOCFC_TOV 5000 /* msecs */
#define DEF_CFG_NUM_SBOOT_TGTS 16
#define DEF_CFG_NUM_SBOOT_LUNS 16
-/**
+/*
* forward declaration for IOC FC functions
*/
static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
static void bfa_iocfc_reset_cbfn(void *bfa_arg);
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
-/**
+/*
* BFA Interrupt handling functions
*/
static void
waitq = bfa_reqq(bfa, qid);
list_for_each_safe(qe, qen, waitq) {
- /**
+ /*
* Callback only as long as there is room in request queue
*/
if (bfa_reqq_full(bfa, qid))
bfa_intx(bfa);
}
-/**
+/*
* hal_intr_api
*/
bfa_boolean_t
if (!intr)
return BFA_FALSE;
- /**
+ /*
* RME completion queue interrupt
*/
qintr = intr & __HFN_INT_RME_MASK;
if (!intr)
return BFA_TRUE;
- /**
+ /*
* CPE completion queue interrupt
*/
qintr = intr & __HFN_INT_CPE_MASK;
bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
- /**
+ /*
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
}
}
- /**
+ /*
* update CI
*/
bfa_rspq_ci(bfa, qid) = pi;
writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
mmiowb();
- /**
+ /*
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
if (intr) {
if (intr & __HFN_INT_LL_HALT) {
- /**
+ /*
* If LL_HALT bit is set then FW Init Halt LL Port
* Register needs to be cleared as well so Interrupt
* Status Register will be cleared.
}
if (intr & __HFN_INT_ERR_PSS) {
- /**
+ /*
* ERR_PSS bit needs to be cleared as well in case
* interrups are shared so driver's interrupt handler is
* still called eventhough it is already masked out.
bfa_isrs[mc] = isr_func;
}
-/**
+/*
* BFA IOC FC related functions
*/
-/**
+/*
* hal_ioc_pvt BFA IOC private functions
*/
BFA_CACHELINE_SZ);
}
-/**
+/*
* Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
*/
static void
bfa_iocfc_reset_queues(bfa);
- /**
+ /*
* initialize IOC configuration info
*/
cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
cfg_info->num_cqs = cfg->fwcfg.num_cqs;
bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
- /**
+ /*
* dma map REQ and RSP circular queues and shadow pointers
*/
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
cpu_to_be16(cfg->drvcfg.num_rspq_elems);
}
- /**
+ /*
* Enable interrupt coalescing if it is driver init path
* and not ioc disable/enable path.
*/
iocfc->cfgdone = BFA_FALSE;
- /**
+ /*
* dma map IOC configuration itself
*/
bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
iocfc->cfg = *cfg;
- /**
+ /*
* Initialize chip specific handlers.
*/
if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
}
}
-/**
+/*
* Start BFA submodules.
*/
static void
hal_mods[i]->start(bfa);
}
-/**
+/*
* Disable BFA submodules.
*/
static void
complete(&bfad->disable_comp);
}
-/**
+/*
* Update BFA configuration from firmware configuration.
*/
static void
iocfc->cfgdone = BFA_TRUE;
- /**
+ /*
* Configuration is complete - initialize/start submodules
*/
bfa_fcport_init(bfa);
}
}
-/**
+/*
* IOC enable request is complete
*/
static void
bfa_iocfc_send_cfg(bfa);
}
-/**
+/*
* IOC disable request is complete
*/
static void
}
}
-/**
+/*
* Notify sub-modules of hardware failure.
*/
static void
bfa);
}
-/**
+/*
* Actions on chip-reset completion.
*/
static void
bfa_isr_enable(bfa);
}
-/**
+/*
* hal_ioc_public
*/
-/**
+/*
* Query IOC memory requirement information.
*/
void
*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
}
-/**
+/*
* Query IOC memory requirement information.
*/
void
ioc->trcmod = bfa->trcmod;
bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
- /**
+ /*
* Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
*/
if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
}
-/**
+/*
* Query IOC memory requirement information.
*/
void
bfa_ioc_detach(&bfa->ioc);
}
-/**
+/*
* Query IOC memory requirement information.
*/
void
bfa_ioc_enable(&bfa->ioc);
}
-/**
+/*
* IOC start called from bfa_start(). Called to start IOC operations
* at driver instantiation for this instance.
*/
bfa_iocfc_start_submod(bfa);
}
-/**
+/*
* IOC stop called from bfa_stop(). Called only when driver is unloaded
* for this instance.
*/
iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
}
-/**
+/*
* Enable IOC after it is disabled.
*/
void
return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
}
-/**
+/*
* Return boot target port wwns -- read from boot information in flash.
*/
void
return cfgrsp->pbc_cfg.nvports;
}
-/**
+/*
* hal_api
*/
-/**
+/*
* Use this function query the memory requirement of the BFA library.
* This function needs to be called before bfa_attach() to get the
* memory required of the BFA layer for a given driver configuration.
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
}
-/**
+/*
* Use this function to do attach the driver instance with the BFA
* library. This function will not trigger any HW initialization
* process (which will be done in bfa_init() call)
bfa_assert((cfg != NULL) && (meminfo != NULL));
- /**
+ /*
* initialize all memory pointers for iterative allocation
*/
for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
bfa_com_port_attach(bfa, meminfo);
}
-/**
+/*
* Use this function to delete a BFA IOC. IOC should be stopped (by
* calling bfa_stop()) before this function call.
*
bfa->plog = plog;
}
-/**
+/*
* Initialize IOC.
*
* This function will return immediately, when the IOC initialization is
bfa_iocfc_init(bfa);
}
-/**
+/*
* Use this function initiate the IOC configuration setup. This function
* will return immediately.
*
bfa_iocfc_start(bfa);
}
-/**
+/*
* Use this function quiese the IOC. This function will return immediately,
* when the IOC is actually stopped, the bfad->comp will be set.
*
bfa->fcs = BFA_TRUE;
}
-/**
+/*
* Periodic timer heart beat from driver
*/
void
bfa_timer_beat(&bfa->timer_mod);
}
-/**
+/*
* Return the list of PCI vendor/device id lists supported by this
* BFA instance.
*/
*pciids = __pciids;
}
-/**
+/*
* Use this function query the default struct bfa_iocfc_cfg_s value (compiled
* into BFA layer). The OS driver can then turn back and overwrite entries that
* have been configured by the user.
bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
}
-/**
+/*
* Retrieve firmware trace information on IOC failure.
*/
bfa_status_t
return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
}
-/**
+/*
* Clear the saved firmware trace information of an IOC.
*/
void
bfa_ioc_debug_fwsave_clear(&bfa->ioc);
}
-/**
+/*
* Fetch firmware trace data.
*
* @param[in] bfa BFA instance
return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
}
-/**
+/*
* Dump firmware memory.
*
* @param[in] bfa BFA instance
{
return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
}
-/**
+/*
* Reset hw semaphore & usage cnt regs and initialize.
*/
void
bfa_ioc_pll_init(&bfa->ioc);
}
-/**
+/*
* Fetch firmware statistics data.
*
* @param[in] bfa BFA instance
#include "bfa_modules.h"
-/**
+/*
* BFA module list terminated by NULL
*/
struct bfa_module_s *hal_mods[] = {
NULL
};
-/**
+/*
* Message handlers for various modules.
*/
bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
};
-/**
+/*
* Message handlers for mailbox command classes
*/
bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
fchs->s_id = (s_id);
fchs->ox_id = cpu_to_be16(ox_id);
- /**
+ /*
* @todo no need to set ox_id for request
* no need to set rx_id for response
*/
(__l->__stats += __r->__stats)
-/**
+/*
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
} \
} while (0)
-/**
+/*
* bfa_itnim_sm BFA itnim state machine
*/
BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
};
-/**
+/*
* BFA IOIM related definitions
*/
#define bfa_ioim_move_to_comp_q(__ioim) do { \
if ((__fcpim)->profile_start) \
(__fcpim)->profile_start(__ioim); \
} while (0)
-/**
+/*
* hal_ioim_sm
*/
-/**
+/*
* IO state machine events
*/
enum bfa_ioim_event {
};
-/**
+/*
* BFA TSKIM related definitions
*/
-/**
+/*
* task management completion handling
*/
#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
};
-/**
+/*
* forward declaration for BFA ITNIM functions
*/
static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
-/**
+/*
* forward declaration of ITNIM state machine
*/
static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
enum bfa_itnim_event event);
-/**
+/*
* forward declaration for BFA IOIM functions
*/
static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-/**
+/*
* forward declaration of BFA IO state machine
*/
static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
enum bfa_ioim_event event);
-/**
+/*
* forward declaration for BFA TSKIM functions
*/
static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
-/**
+/*
* forward declaration of BFA TSKIM state machine
*/
static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
-/**
+/*
* hal_fcpim_mod BFA FCP Initiator Mode module
*/
-/**
+/*
* Compute and return memory needed by FCP(im) module.
*/
static void
{
bfa_itnim_meminfo(cfg, km_len, dm_len);
- /**
+ /*
* IO memory
*/
if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
*dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
- /**
+ /*
* task management command memory
*/
if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
-/**
+/*
* BFA ITNIM module state machine functions
*/
-/**
+/*
* Beginning/unallocated state - no events expected.
*/
static void
}
}
-/**
+/*
* Beginning state, only online event expected.
*/
static void
}
}
-/**
+/*
* Waiting for itnim create response from firmware.
*/
static void
}
}
-/**
+/*
* Waiting for itnim create response from firmware, a delete is pending.
*/
static void
}
}
-/**
+/*
* Online state - normal parking state.
*/
static void
}
}
-/**
+/*
* Second level error recovery need.
*/
static void
}
}
-/**
+/*
* Going offline. Waiting for active IO cleanup.
*/
static void
}
}
-/**
+/*
* Deleting itnim. Waiting for active IO cleanup.
*/
static void
}
}
-/**
+/*
* Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
*/
static void
}
}
-/**
+/*
* Offline state.
*/
static void
}
}
-/**
+/*
* IOC h/w failed state.
*/
static void
}
}
-/**
+/*
* Itnim is deleted, waiting for firmware response to delete.
*/
static void
}
}
-/**
+/*
* Initiate cleanup of all IOs on an IOC failure.
*/
static void
bfa_ioim_iocdisable(ioim);
}
- /**
+ /*
* For IO request in pending queue, we pretend an early timeout.
*/
list_for_each_safe(qe, qen, &itnim->pending_q) {
}
}
-/**
+/*
* IO cleanup completion
*/
static void
bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
}
-/**
+/*
* Initiate cleanup of all IOs.
*/
static void
list_for_each_safe(qe, qen, &itnim->io_q) {
ioim = (struct bfa_ioim_s *) qe;
- /**
+ /*
* Move IO to a cleanup queue from active queue so that a later
* TM will not pickup this IO.
*/
bfa_cb_itnim_sler(itnim->ditn);
}
-/**
+/*
* Call to resume any I/O requests waiting for room in request queue.
*/
static void
-/**
+/*
* bfa_itnim_public
*/
bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len)
{
- /**
+ /*
* ITN memory
*/
*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
itnim->msg_no++;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(itnim->bfa, itnim->reqq);
m->msg_no = itnim->msg_no;
bfa_stats(itnim, fw_create);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(itnim->bfa, itnim->reqq);
{
struct bfi_itnim_delete_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(itnim->bfa, itnim->reqq);
m->fw_handle = itnim->rport->fw_handle;
bfa_stats(itnim, fw_delete);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(itnim->bfa, itnim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Cleanup all pending failed inflight requests.
*/
static void
}
}
-/**
+/*
* Start all pending IO requests.
*/
static void
bfa_itnim_iotov_stop(itnim);
- /**
+ /*
* Abort all inflight IO requests in the queue
*/
bfa_itnim_delayed_comp(itnim, BFA_FALSE);
- /**
+ /*
* Start all pending IO requests.
*/
while (!list_empty(&itnim->pending_q)) {
}
}
-/**
+/*
* Fail all pending IO requests
*/
static void
{
struct bfa_ioim_s *ioim;
- /**
+ /*
* Fail all inflight IO requests in the queue
*/
bfa_itnim_delayed_comp(itnim, BFA_TRUE);
- /**
+ /*
* Fail any pending IO requests.
*/
while (!list_empty(&itnim->pending_q)) {
}
}
-/**
+/*
* IO TOV timer callback. Fail any pending IO requests.
*/
static void
bfa_cb_itnim_tov(itnim->ditn);
}
-/**
+/*
* Start IO TOV timer for failing back pending IO requests in offline state.
*/
static void
}
}
-/**
+/*
* Stop IO TOV timer.
*/
static void
}
}
-/**
+/*
* Stop IO TOV timer.
*/
static void
-/**
+/*
* bfa_itnim_public
*/
-/**
+/*
* Itnim interrupt processing.
*/
void
-/**
+/*
* bfa_itnim_api
*/
bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
}
-/**
+/*
* Return true if itnim is considered offline for holding off IO request.
* IO is not held if itnim is being deleted.
*/
itnim->ioprofile.io_latency.min[j] = ~0;
}
-/**
+/*
* BFA IO module state machine functions
*/
-/**
+/*
* IO is not started (unallocated).
*/
static void
break;
case BFA_IOIM_SM_ABORT:
- /**
+ /*
* IO in pending queue can get abort requests. Complete abort
* requests immediately.
*/
}
}
-/**
+/*
* IO is waiting for SG pages.
*/
static void
}
}
-/**
+/*
* IO is active.
*/
static void
}
}
-/**
+/*
* IO is retried with new tag.
*/
static void
break;
case BFA_IOIM_SM_ABORT:
- /** in this state IO abort is done.
+ /* in this state IO abort is done.
* Waiting for IO tag resource free.
*/
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
}
}
-/**
+/*
* IO is being aborted, waiting for completion from firmware.
*/
static void
}
}
-/**
+/*
* IO is being cleaned up (implicit abort), waiting for completion from
* firmware.
*/
break;
case BFA_IOIM_SM_ABORT:
- /**
+ /*
* IO is already being aborted implicitly
*/
ioim->io_cbfn = __bfa_cb_ioim_abort;
break;
case BFA_IOIM_SM_CLEANUP:
- /**
+ /*
* IO can be in cleanup state already due to TM command.
* 2nd cleanup request comes from ITN offline event.
*/
}
}
-/**
+/*
* IO is waiting for room in request CQ
*/
static void
}
}
-/**
+/*
* Active IO is being aborted, waiting for room in request CQ.
*/
static void
}
}
-/**
+/*
* Active IO is being cleaned up, waiting for room in request CQ.
*/
static void
break;
case BFA_IOIM_SM_ABORT:
- /**
+ /*
* IO is alraedy being cleaned up implicitly
*/
ioim->io_cbfn = __bfa_cb_ioim_abort;
}
}
-/**
+/*
* IO bfa callback is pending.
*/
static void
}
}
-/**
+/*
* IO bfa callback is pending. IO resource cannot be freed.
*/
static void
}
}
-/**
+/*
* IO is completed, waiting resource free from firmware.
*/
static void
-/**
+/*
* hal_ioim_private
*/
m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
if (m->io_status == BFI_IOIM_STS_OK) {
- /**
+ /*
* setup sense information, if present
*/
if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
snsinfo = ioim->iosp->snsinfo;
}
- /**
+ /*
* setup residue value correctly for normal completions
*/
if (m->resid_flags == FCP_RESID_UNDER) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
}
-/**
+/*
* Send I/O request to firmware.
*/
static bfa_boolean_t
struct scatterlist *sg;
struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(ioim->bfa, ioim->reqq);
return BFA_FALSE;
}
- /**
+ /*
* build i/o request message next
*/
m->io_tag = cpu_to_be16(ioim->iotag);
m->rport_hdl = ioim->itnim->rport->fw_handle;
m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
- /**
+ /*
* build inline IO SG element here
*/
sge = &m->sges[0];
sge->flags = BFI_SGE_PGDLEN;
bfa_sge_to_be(sge);
- /**
+ /*
* set up I/O command parameters
*/
m->cmnd = cmnd_z0;
fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
- /**
+ /*
* set up I/O message header
*/
switch (m->cmnd.iodir) {
m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
- /**
+ /*
* Handle large CDB (>16 bytes).
*/
m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
}
#endif
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(ioim->bfa, ioim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Setup any additional SG pages needed.Inline SG element is setup
* at queuing time.
*/
bfa_assert(ioim->nsges > BFI_SGE_INLINE);
- /**
+ /*
* allocate SG pages needed
*/
nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
sge->sg_len = sg_dma_len(sg);
pgcumsz += sge->sg_len;
- /**
+ /*
* set flags
*/
if (i < (nsges - 1))
sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
- /**
+ /*
* set the link element of each page
*/
if (sgeid == ioim->nsges) {
} while (sgeid < ioim->nsges);
}
-/**
+/*
* Send I/O abort request to firmware.
*/
static bfa_boolean_t
struct bfi_ioim_abort_req_s *m;
enum bfi_ioim_h2i msgop;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(ioim->bfa, ioim->reqq);
if (!m)
return BFA_FALSE;
- /**
+ /*
* build i/o request message next
*/
if (ioim->iosp->abort_explicit)
m->io_tag = cpu_to_be16(ioim->iotag);
m->abort_tag = ++ioim->abort_tag;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(ioim->bfa, ioim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Call to resume any I/O requests waiting for room in request queue.
*/
static void
static void
bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
{
- /**
+ /*
* Move IO from itnim queue to fcpim global queue since itnim will be
* freed.
*/
return BFA_TRUE;
}
-/**
+/*
* or after the link comes back.
*/
void
bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
{
- /**
+ /*
* If path tov timer expired, failback with PATHTOV status - these
* IO requests are not normally retried by IO stack.
*
}
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
- /**
+ /*
* Move IO to fcpim global queue since itnim will be
* freed.
*/
-/**
+/*
* hal_ioim_friend
*/
-/**
+/*
* Memory allocation and initialization.
*/
void
u8 *snsinfo;
u32 snsbufsz;
- /**
+ /*
* claim memory first
*/
ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
fcpim->ioim_sp_arr = iosp;
bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
- /**
+ /*
* Claim DMA memory for per IO sense data.
*/
snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
snsinfo = fcpim->snsbase.kva;
bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
- /**
+ /*
* Initialize ioim free queues
*/
INIT_LIST_HEAD(&fcpim->ioim_free_q);
}
}
-/**
+/*
* Driver detach time call.
*/
void
io_lat->max[index] : val;
io_lat->avg[index] += val;
}
-/**
+/*
* Called by itnim to clean up IO while going offline.
*/
void
bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
}
-/**
+/*
* IOC failure handling.
*/
void
bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
}
-/**
+/*
* IO offline TOV popped. Fail the pending IO.
*/
void
-/**
+/*
* hal_ioim_api
*/
-/**
+/*
* Allocate IOIM resource for initiator mode I/O request.
*/
struct bfa_ioim_s *
struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
struct bfa_ioim_s *ioim;
- /**
+ /*
* alocate IOIM resource
*/
bfa_q_deq(&fcpim->ioim_free_q, &ioim);
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
- /**
+ /*
* Obtain the queue over which this request has to be issued
*/
ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
}
-/**
+/*
* Driver I/O abort request.
*/
bfa_status_t
}
-/**
+/*
* BFA TSKIM state machine functions
*/
-/**
+/*
* Task management command beginning state.
*/
static void
bfa_sm_set_state(tskim, bfa_tskim_sm_active);
bfa_tskim_gather_ios(tskim);
- /**
+ /*
* If device is offline, do not send TM on wire. Just cleanup
* any pending IO requests and complete TM request.
*/
}
}
-/**
+/*
* brief
* TM command is active, awaiting completion from firmware to
* cleanup IO requests in TM scope.
}
}
-/**
+/*
* An active TM is being cleaned up since ITN is offline. Awaiting cleanup
* completion event from firmware.
*/
switch (event) {
case BFA_TSKIM_SM_DONE:
- /**
+ /*
* Ignore and wait for ABORT completion from firmware.
*/
break;
break;
case BFA_TSKIM_SM_CLEANUP:
- /**
+ /*
* Ignore, TM command completed on wire.
* Notify TM conmpletion on IO cleanup completion.
*/
}
}
-/**
+/*
* Task management command is waiting for room in request CQ
*/
static void
break;
case BFA_TSKIM_SM_CLEANUP:
- /**
+ /*
* No need to send TM on wire since ITN is offline.
*/
bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
}
}
-/**
+/*
* Task management command is active, awaiting for room in request CQ
* to send clean up request.
*/
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_reqq_wcancel(&tskim->reqq_wait);
- /**
+ /*
*
* Fall through !!!
*/
}
}
-/**
+/*
* BFA callback is pending
*/
static void
-/**
+/*
* hal_tskim_private
*/
return BFA_FALSE;
}
-/**
+/*
* Gather affected IO requests and task management commands.
*/
static void
INIT_LIST_HEAD(&tskim->io_q);
- /**
+ /*
* Gather any active IO requests first.
*/
list_for_each_safe(qe, qen, &itnim->io_q) {
}
}
- /**
+ /*
* Failback any pending IO requests immediately.
*/
list_for_each_safe(qe, qen, &itnim->pending_q) {
}
}
-/**
+/*
* IO cleanup completion
*/
static void
bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
}
-/**
+/*
* Gather affected IO requests and task management commands.
*/
static void
bfa_wc_wait(&tskim->wc);
}
-/**
+/*
* Send task management request to firmware.
*/
static bfa_boolean_t
struct bfa_itnim_s *itnim = tskim->itnim;
struct bfi_tskim_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(tskim->bfa, itnim->reqq);
if (!m)
return BFA_FALSE;
- /**
+ /*
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
m->lun = tskim->lun;
m->tm_flags = tskim->tm_cmnd;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(tskim->bfa, itnim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Send abort request to cleanup an active TM to firmware.
*/
static bfa_boolean_t
struct bfa_itnim_s *itnim = tskim->itnim;
struct bfi_tskim_abortreq_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(tskim->bfa, itnim->reqq);
if (!m)
return BFA_FALSE;
- /**
+ /*
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(tskim->bfa, itnim->reqq);
return BFA_TRUE;
}
-/**
+/*
* Call to resume task management cmnd waiting for room in request queue.
*/
static void
bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
}
-/**
+/*
* Cleanup IOs associated with a task mangement command on IOC failures.
*/
static void
-/**
+/*
* hal_tskim_friend
*/
-/**
+/*
* Notification on completions from related ioim.
*/
void
bfa_wc_down(&tskim->wc);
}
-/**
+/*
* Handle IOC h/w failure notification from itnim.
*/
void
bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
}
-/**
+/*
* Cleanup TM command and associated IOs as part of ITNIM offline.
*/
void
bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
}
-/**
+/*
* Memory allocation and initialization.
*/
void
void
bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
{
- /**
+ /*
* @todo
*/
}
tskim->tsk_status = rsp->tsk_status;
- /**
+ /*
* Firmware sends BFI_TSKIM_STS_ABORTED status for abort
* requests. All other statuses are for normal completions.
*/
-/**
+/*
* hal_tskim_api
*/
list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
}
-/**
+/*
* Start a task management command.
*
* @param[in] tskim BFA task management command instance
* General Public License for more details.
*/
-/**
+/*
* bfa_fcs.c BFA FCS main
*/
BFA_TRC_FILE(FCS, FCS);
-/**
+/*
* FCS sub-modules
*/
struct bfa_fcs_mod_s {
bfa_fcs_fabric_modexit },
};
-/**
+/*
* fcs_api BFA FCS API
*/
-/**
+/*
* fcs_api BFA FCS API
*/
-/**
+/*
* fcs attach -- called once to initialize data structures at driver attach time
*/
void
}
}
-/**
+/*
* fcs initialization, called once after bfa initialization is complete
*/
void
}
}
-/**
+/*
* Start FCS operations.
*/
void
bfa_fcs_fabric_modstart(fcs);
}
-/**
+/*
* brief
* FCS driver details initialization.
*
bfa_fcs_fabric_psymb_init(&fcs->fabric);
}
-/**
+/*
* brief
* FCS FDMI Driver Parameter Initialization
*
fcs->fdmi_enabled = fdmi_enable;
}
-/**
+/*
* brief
* FCS instance cleanup and exit.
*
bfa_wc_down(&fcs->wc);
}
-/**
+/*
* Fabric module implementation.
*/
u32 rsp_len,
u32 resid_len,
struct fchs_s *rspfchs);
-/**
+/*
* fcs_fabric_sm fabric state machine functions
*/
-/**
+/*
* Fabric state machine events
*/
enum bfa_fcs_fabric_event {
enum bfa_fcs_fabric_event event);
static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
-/**
+/*
* Beginning state before fabric creation.
*/
static void
}
}
-/**
+/*
* Beginning state before fabric creation.
*/
static void
}
}
-/**
+/*
* Link is down, awaiting LINK UP event from port. This is also the
* first state at fabric creation.
*/
}
}
-/**
+/*
* FLOGI is in progress, awaiting FLOGI reply.
*/
static void
}
}
-/**
+/*
* Authentication is in progress, awaiting authentication results.
*/
static void
}
}
-/**
+/*
* Authentication failed
*/
static void
}
}
-/**
+/*
* Port is in loopback mode.
*/
static void
}
}
-/**
+/*
* There is no attached fabric - private loop or NPort-to-NPort topology.
*/
static void
}
}
-/**
+/*
* Fabric is online - normal operating state.
*/
static void
}
}
-/**
+/*
* Exchanging virtual fabric parameters.
*/
static void
}
}
-/**
+/*
* EVFP exchange complete and VFT tagging is enabled.
*/
static void
bfa_trc(fabric->fcs, event);
}
-/**
+/*
* Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
*/
static void
fabric->event_arg.swp_vfid);
}
-/**
+/*
* Fabric is being deleted, awaiting vport delete completions.
*/
static void
-/**
+/*
* fcs_fabric_private fabric private functions
*/
port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
}
-/**
+/*
* Port Symbolic Name Creation for base port.
*/
void
port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
}
-/**
+/*
* bfa lps login completion callback
*/
void
bfa_trc(fabric->fcs, fabric->is_npiv);
bfa_trc(fabric->fcs, fabric->is_auth);
}
-/**
+/*
* Allocate and send FLOGI.
*/
static void
bfa_fcs_fabric_set_opertype(fabric);
fabric->stats.fabric_onlines++;
- /**
+ /*
* notify online event to base and then virtual ports
*/
bfa_fcs_lport_online(&fabric->bport);
bfa_trc(fabric->fcs, fabric->fabric_name);
fabric->stats.fabric_offlines++;
- /**
+ /*
* notify offline event first to vports and then base port.
*/
list_for_each_safe(qe, qen, &fabric->vport_q) {
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
}
-/**
+/*
* Delete all vports and wait for vport delete completions.
*/
static void
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
}
-/**
+/*
* fcs_fabric_public fabric public functions
*/
-/**
+/*
* Attach time initialization.
*/
void
fabric = &fcs->fabric;
memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
- /**
+ /*
* Initialize base fabric.
*/
fabric->fcs = fcs;
fabric->lps = bfa_lps_alloc(fcs->bfa);
bfa_assert(fabric->lps);
- /**
+ /*
* Initialize fabric delete completion handler. Fabric deletion is
* complete when the last vport delete is complete.
*/
bfa_trc(fcs, 0);
}
-/**
+/*
* Module cleanup
*/
void
bfa_trc(fcs, 0);
- /**
+ /*
* Cleanup base fabric.
*/
fabric = &fcs->fabric;
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
}
-/**
+/*
* Fabric module start -- kick starts FCS actions
*/
void
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
}
-/**
+/*
* Suspend fabric activity as part of driver suspend.
*/
void
return fabric->oper_type;
}
-/**
+/*
* Link up notification from BFA physical port module.
*/
void
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
}
-/**
+/*
* Link down notification from BFA physical port module.
*/
void
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
}
-/**
+/*
* A child vport is being created in the fabric.
*
* Call from vport module at vport creation. A list of base port and vports
bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
struct bfa_fcs_vport_s *vport)
{
- /**
+ /*
* - add vport to fabric's vport_q
*/
bfa_trc(fabric->fcs, fabric->vf_id);
bfa_wc_up(&fabric->wc);
}
-/**
+/*
* A child vport is being deleted from fabric.
*
* Vport is being deleted.
bfa_wc_down(&fabric->wc);
}
-/**
+/*
* Base port is deleted.
*/
void
}
-/**
+/*
* Check if fabric is online.
*
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
}
-/**
+/*
* brief
*
*/
return BFA_STATUS_OK;
}
-/**
+/*
* Lookup for a vport withing a fabric given its pwwn
*/
struct bfa_fcs_vport_s *
return NULL;
}
-/**
+/*
* In a given fabric, return the number of lports.
*
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
return oui;
}
-/**
+/*
* Unsolicited frame receive handling.
*/
void
bfa_trc(fabric->fcs, len);
bfa_trc(fabric->fcs, pid);
- /**
+ /*
* Look for our own FLOGI frames being looped back. This means an
* external loopback cable is in place. Our own FLOGI frames are
* sometimes looped back when switch port gets temporarily bypassed.
return;
}
- /**
+ /*
* FLOGI/EVFP exchanges should be consumed by base fabric.
*/
if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
}
if (fabric->bport.pid == pid) {
- /**
+ /*
* All authentication frames should be routed to auth
*/
bfa_trc(fabric->fcs, els_cmd->els_code);
return;
}
- /**
+ /*
* look for a matching local port ID
*/
list_for_each(qe, &fabric->vport_q) {
bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
}
-/**
+/*
* Unsolicited frames to be processed by fabric.
*/
static void
}
}
-/**
+/*
* Process incoming FLOGI
*/
static void
struct fchs_s fchs;
fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
- /**
+ /*
* Do not expect this failure -- expect remote node to retry
*/
if (!fcxp)
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* Flogi Acc completion callback.
*/
static void
}
}
-/**
+/*
* Returns FCS vf structure for a given vf_id.
*
* param[in] vf_id - VF_ID
return NULL;
}
-/**
+/*
* BFA FCS PPORT ( physical port)
*/
static void
bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
}
-/**
+/*
* BFA FCS UF ( Unsolicited Frames)
*/
-/**
+/*
* BFA callback for unsolicited frame receive handler.
*
* @param[in] cbarg callback arg for receive handler
struct fc_vft_s *vft;
struct bfa_fcs_fabric_s *fabric;
- /**
+ /*
* check for VFT header
*/
if (fchs->routing == FC_RTG_EXT_HDR &&
else
fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
- /**
+ /*
* drop frame if vfid is unknown
*/
if (!fabric) {
return;
}
- /**
+ /*
* skip vft header
*/
fchs = (struct fchs_s *) (vft + 1);
* General Public License for more details.
*/
-/**
+/*
* fcpim.c - FCP initiator mode i-t nexus state machine
*/
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
-/**
+/*
* fcs_itnim_sm FCS itnim state machine events
*/
{BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
};
-/**
+/*
* fcs_itnim_sm FCS itnim state machine
*/
-/**
+/*
* itnim_public FCS ITNIM public interfaces
*/
-/**
+/*
* Called by rport when a new rport is created.
*
* @param[in] rport - remote port.
return itnim;
}
-/**
+/*
* Called by rport to delete the instance of FCPIM.
*
* @param[in] rport - remote port.
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
}
-/**
+/*
* Notification from rport that PLOGI is complete to initiate FC-4 session.
*/
void
}
}
-/**
+/*
* Called by rport to handle a remote device offline.
*/
void
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
}
-/**
+/*
* Called by rport when remote port is known to be an initiator from
* PRLI received.
*/
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
}
-/**
+/*
* Called by rport to check if the itnim is online.
*/
bfa_status_t
}
}
-/**
+/*
* BFA completion callback for bfa_itnim_online().
*/
void
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
}
-/**
+/*
* BFA completion callback for bfa_itnim_offline().
*/
void
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
}
-/**
+/*
* Mark the beginning of PATH TOV handling. IO completion callbacks
* are still pending.
*/
bfa_trc(itnim->fcs, itnim->rport->pwwn);
}
-/**
+/*
* Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
*/
void
itnim_drv->state = ITNIM_STATE_TIMEOUT;
}
-/**
+/*
* BFA notification to FCS/driver for second level error recovery.
*
* Atleast one I/O request has timedout and target is unresponsive to
* General Public License for more details.
*/
-/**
- * bfa_fcs_lport.c BFA FCS port
- */
-
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfa_fc.h"
BFA_TRC_FILE(FCS, PORT);
-/**
- * Forward declarations
- */
-
static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs, u8 reason_code,
u8 reason_code_expl);
bfa_fcs_lport_n2n_offline},
};
-/**
+/*
* fcs_port_sm FCS logical port state machine
*/
}
}
-/**
+/*
* fcs_port_pvt
*/
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* Process incoming plogi from a remote port.
*/
static void
return;
}
- /**
+ /*
* Direct Attach P2P mode : verify address assigned by the r-port.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
port->pid = rx_fchs->d_id;
}
- /**
+ /*
* First, check if we know the device by pwwn.
*/
rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
if (rport) {
- /**
+ /*
* Direct Attach P2P mode : handle address assigned by r-port.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
return;
}
- /**
+ /*
* Next, lookup rport by PID.
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
if (!rport) {
- /**
+ /*
* Inbound PLOGI from a new device.
*/
bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
return;
}
- /**
+ /*
* Rport is known only by PID.
*/
if (rport->pwwn) {
- /**
+ /*
* This is a different device with the same pid. Old device
* disappeared. Send implicit LOGO to old device.
*/
bfa_assert(rport->pwwn != plogi->port_name);
bfa_fcs_rport_logo_imp(rport);
- /**
+ /*
* Inbound PLOGI from a new device (with old PID).
*/
bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
return;
}
- /**
+ /*
* PLOGI crossing each other.
*/
bfa_assert(rport->pwwn == WWN_NULL);
-/**
+/*
* fcs_lport_api BFA FCS port API
*/
-/**
+/*
* Module initialization
*/
void
}
-/**
+/*
* Module cleanup
*/
void
bfa_fcs_modexit_comp(fcs);
}
-/**
+/*
* Unsolicited frame receive handling.
*/
void
return;
}
- /**
+ /*
* First, handle ELSs that donot require a login.
*/
/*
bfa_fcs_lport_abts_acc(lport, fchs);
return;
}
- /**
+ /*
* look for a matching remote port ID
*/
rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
return;
}
- /**
+ /*
* Only handles ELS frames for now.
*/
if (fchs->type != FC_TYPE_ELS) {
}
if (els_cmd->els_code == FC_ELS_LOGO) {
- /**
+ /*
* @todo Handle LOGO frames received.
*/
return;
}
if (els_cmd->els_code == FC_ELS_PRLI) {
- /**
+ /*
* @todo Handle PRLI frames received.
*/
return;
}
- /**
+ /*
* Unhandled ELS frames. Send a LS_RJT.
*/
bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
}
-/**
+/*
* PID based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
return NULL;
}
-/**
+/*
* PWWN based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
return NULL;
}
-/**
+/*
* NWWN based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
return NULL;
}
-/**
+/*
* Called by rport module when new rports are discovered.
*/
void
port->num_rports++;
}
-/**
+/*
* Called by rport module to when rports are deleted.
*/
void
bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
}
-/**
+/*
* Called by fabric for base port when fabric login is complete.
* Called by vport for virtual ports when FDISC is complete.
*/
bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
}
-/**
+/*
* Called by fabric for base port when fabric goes offline.
* Called by vport for virtual ports when virtual port becomes offline.
*/
bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
}
-/**
+/*
* Called by fabric to delete base lport and associated resources.
*
* Called by vport to delete lport and associated resources. Should call
bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
}
-/**
+/*
* Return TRUE if port is online, else return FALSE
*/
bfa_boolean_t
return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
}
-/**
+/*
* Attach time initialization of logical ports.
*/
void
lport->num_rports = 0;
}
-/**
+/*
* Logical port initialization of base or virtual port.
* Called by fabric for base port or by vport for virtual ports.
*/
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
}
-/**
+/*
* fcs_lport_api
*/
}
}
-/**
+/*
* bfa_fcs_lport_fab port fab functions
*/
-/**
+/*
* Called by port to initialize fabric services of the base port.
*/
static void
bfa_fcs_lport_ms_init(port);
}
-/**
+/*
* Called by port to notify transition to online state.
*/
static void
bfa_fcs_lport_scn_online(port);
}
-/**
+/*
* Called by port to notify transition to offline state.
*/
static void
bfa_fcs_lport_ms_offline(port);
}
-/**
+/*
* bfa_fcs_lport_n2n functions
*/
-/**
+/*
* Called by fcs/port to initialize N2N topology.
*/
static void
{
}
-/**
+/*
* Called by fcs/port to notify transition to online state.
*/
static void
((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
sizeof(wwn_t)) > 0) {
port->pid = N2N_LOCAL_PID;
- /**
+ /*
* First, check if we know the device by pwwn.
*/
rport = bfa_fcs_lport_get_rport_by_pwwn(port,
}
}
-/**
+/*
* Called by fcs/port to notify transition to offline state.
*/
static void
struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_port_attr_s *port_attr);
-/**
+/*
* fcs_fdmi_sm FCS FDMI state machine
*/
-/**
+/*
* FDMI State Machine events
*/
enum port_fdmi_event {
static void bfa_fcs_lport_fdmi_sm_disabled(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
-/**
+/*
* Start in offline state - awaiting MS to send start.
*/
static void
bfa_sm_fault(port->fcs, event);
}
}
-/**
+/*
* FDMI is disabled state.
*/
static void
/* No op State. It can only be enabled at Driver Init. */
}
-/**
+/*
* RHBA : Register HBA Attributes.
*/
static void
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
attr->len = (u16) strlen(fcs_hba_attr->serial_num);
memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
attr->len = (u16) strlen(fcs_hba_attr->model);
memcpy(attr->value, fcs_hba_attr->model, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
attr->len = (u16) strlen(fcs_hba_attr->model_desc);
memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->hw_version);
memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->driver_version);
memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
attr->len = (u16) strlen(fcs_hba_attr->driver_version);
memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
attr->len = (u16) strlen(fcs_hba_attr->os_name);
memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
len += attr->len;
count++;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
* Update size of payload
*/
- len += ((sizeof(attr->type) +
- sizeof(attr->len)) * count);
+ len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
return len;
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
}
-/**
+/*
* RPRT : Register Port
*/
static void
bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
}
-/**
+/*
* This routine builds Port Attribute Block that used in RPA, RPRT commands.
*/
static u16
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
/*
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
attr->len = (u16) strlen(fcs_port_attr.os_device_name);
memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
/*
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
attr->len = (u16) strlen(fcs_port_attr.host_name);
memcpy(attr->value, fcs_port_attr.host_name, attr->len);
- attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
- *fields need
- *to be 4 byte
- *aligned */
+ attr->len = fc_roundup(attr->len, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
len += attr->len;
++count;
- attr->len =
- cpu_to_be16(attr->len + sizeof(attr->type) +
+ attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
sizeof(attr->len));
}
* Update size of payload
*/
port_attrib->attr_count = cpu_to_be32(count);
- len += ((sizeof(attr->type) +
- sizeof(attr->len)) * count);
+ len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
return len;
}
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
}
-/**
+/*
* RPA : Register Port Attributes.
*/
static void
len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
FDMI_RPA);
- attr_len =
- bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
- (u8 *) ((struct ct_hdr_s *) pyld
- + 1));
+ attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
+ (u8 *) ((struct ct_hdr_s *) pyld + 1));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len + attr_len, &fchs,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
-/**
+/*
* fcs_ms_sm FCS MS state machine
*/
-/**
+/*
* MS State Machine events
*/
enum port_ms_event {
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
-/**
+/*
* Start in offline state - awaiting NS to send start.
*/
static void
*/
bfa_fcs_lport_fdmi_online(ms);
- /**
+ /*
* if this is a Vport, go to online state.
*/
if (ms->port->vport) {
bfa_sm_fault(ms->port->fcs, event);
}
}
-/**
+/*
* ms_pvt MS local functions
*/
bfa_sm_fault(ms->port->fcs, event);
}
}
-/**
+/*
* ms_pvt MS local functions
*/
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* ms_pvt MS local functions
*/
bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
}
-/**
+/*
* @page ns_sm_info VPORT NS State Machine
*
* @section ns_sm_interactions VPORT NS State Machine Interactions
u32 *pid_buf, u32 n_pids);
static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
-/**
+/*
* fcs_ns_sm FCS nameserver interface state machine
*/
-/**
+/*
* VPort NS State Machine events
*/
enum vport_ns_event {
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
-/**
+/*
* Start in offline state - awaiting linkup
*/
static void
-/**
+/*
* ns_pvt Nameserver local functions
*/
}
}
-/**
+/*
* Register the symbolic port name.
*/
static void
* for V-Port, form a Port Symbolic Name
*/
if (port->vport) {
- /**
+ /*
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* Register FC4-Types
*/
static void
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* Register FC4-Features : Should be done after RFT_ID
*/
static void
} else
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
-/**
+/*
* Query Fabric for FC4-Types Devices.
*
* TBD : Need to use a local (FCS private) response buffer, since the response
}
}
-/**
+/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] port - pointer to bfa_fcs_lport_t.
}
}
-/**
+/*
* fcs_ns_public FCS nameserver public interfaces
*/
}
}
-/**
+/*
* FCS SCN
*/
struct fchs_s *rx_fchs);
static void bfa_fcs_lport_scn_timeout(void *arg);
-/**
+/*
* fcs_scm_sm FCS SCN state machine
*/
-/**
+/*
* VPort SCN State Machine events
*/
enum port_scn_event {
static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
-/**
+/*
* Starting state - awaiting link up.
*/
static void
-/**
+/*
* fcs_scn_private FCS SCN private functions
*/
-/**
+/*
* This routine will be called to send a SCR command.
*/
static void
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] vport - pointer to bfa_fcs_lport_t.
-/**
+/*
* fcs_scn_public FCS state change notification public interfaces
*/
bfa_trc(port->fcs, rpid);
- /**
+ /*
* If this is an unknown device, then it just came online.
* Otherwise let rport handle the RSCN event.
*/
bfa_fcs_rport_scn(rport);
}
-/**
+/*
* rscn format based PID comparison
*/
#define __fc_pid_match(__c0, __c1, __fmt) \
}
}
- /**
- * If any of area, domain or fabric RSCN is received, do a fresh discovery
- * to find new devices.
+ /*
+ * If any of area, domain or fabric RSCN is received, do a fresh
+ * discovery to find new devices.
*/
if (nsquery)
bfa_fcs_lport_ns_query(port);
}
-/**
+/*
* BFA FCS port
*/
-/**
+/*
* fcs_port_api BFA FCS port API
*/
struct bfa_fcs_lport_s *
memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
}
-/**
+/*
* FCS virtual port state machine
*/
static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
-/**
+/*
* fcs_vport_sm FCS virtual port state machine
*/
-/**
+/*
* VPort State Machine events
*/
enum bfa_fcs_vport_event {
{BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
};
-/**
+/*
* Beginning state.
*/
static void
}
}
-/**
+/*
* Created state - a start event is required to start up the state machine.
*/
static void
bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
bfa_fcs_vport_do_fdisc(vport);
} else {
- /**
+ /*
* Fabric is offline or not NPIV capable, stay in
* offline state.
*/
case BFA_FCS_VPORT_SM_ONLINE:
case BFA_FCS_VPORT_SM_OFFLINE:
- /**
+ /*
* Ignore ONLINE/OFFLINE events from fabric
* till vport is started.
*/
}
}
-/**
+/*
* Offline state - awaiting ONLINE event from fabric SM.
*/
static void
}
-/**
+/*
* FDISC is sent and awaiting reply from fabric.
*/
static void
}
}
-/**
+/*
* FDISC attempt failed - a timer is active to retry FDISC.
*/
static void
}
}
-/**
+/*
* Vport is online (FDISC is complete).
*/
static void
}
}
-/**
+/*
* Vport is being deleted - awaiting lport delete completion to send
* LOGO to fabric.
*/
}
}
-/**
+/*
* Error State.
* This state will be set when the Vport Creation fails due
* to errors like Dup WWN. In this state only operation allowed
}
}
-/**
+/*
* Lport cleanup is in progress since vport is being deleted. Fabric is
* offline, so no LOGO is needed to complete vport deletion.
*/
}
}
-/**
+/*
* LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
* is done.
*/
-/**
+/*
* fcs_vport_private FCS virtual port private functions
*/
-/**
+/*
* This routine will be called to send a FDISC command.
*/
static void
}
}
-/**
+/*
* Called to send a logout to the fabric. Used when a V-Port is
* deleted/stopped.
*/
}
-/**
+/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] vport - pointer to bfa_fcs_vport_t.
-/**
+/*
* fcs_vport_public FCS virtual port public interfaces
*/
-/**
+/*
* Online notification from fabric SM.
*/
void
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
}
-/**
+/*
* Offline notification from fabric SM.
*/
void
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
}
-/**
+/*
* Cleanup notification from fabric SM on link timer expiry.
*/
void
{
vport->vport_stats.fab_cleanup++;
}
-/**
+/*
* delete notification from fabric SM. To be invoked from within FCS.
*/
void
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
}
-/**
+/*
* Delete completion callback from associated lport
*/
void
-/**
+/*
* fcs_vport_api Virtual port API
*/
-/**
+/*
* Use this function to instantiate a new FCS vport object. This
* function will not trigger any HW initialization process (which will be
* done in vport_start() call)
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function to instantiate a new FCS PBC vport object. This
* function will not trigger any HW initialization process (which will be
* done in vport_start() call)
return rc;
}
-/**
+/*
* Use this function to findout if this is a pbc vport or not.
*
* @param[in] vport - pointer to bfa_fcs_vport_t.
}
-/**
+/*
* Use this function initialize the vport.
*
* @param[in] vport - pointer to bfa_fcs_vport_t.
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function quiese the vport object. This function will return
* immediately, when the vport is actually stopped, the
* bfa_drv_vport_stop_cb() will be called.
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function to delete a vport object. Fabric object should
* be stopped before this function call.
*
return BFA_STATUS_OK;
}
-/**
+/*
* Use this function to get vport's current status info.
*
* param[in] vport pointer to bfa_fcs_vport_t.
attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
}
-/**
+/*
* Use this function to get vport's statistics.
*
* param[in] vport pointer to bfa_fcs_vport_t.
*stats = vport->vport_stats;
}
-/**
+/*
* Use this function to clear vport's statistics.
*
* param[in] vport pointer to bfa_fcs_vport_t.
memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
}
-/**
+/*
* Lookup a virtual port. Excludes base port from lookup.
*/
struct bfa_fcs_vport_s *
return vport;
}
-/**
+/*
* FDISC Response
*/
void
}
}
-/**
+/*
* LOGO response
*/
void
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
}
-/**
+/*
* Received clear virtual link
*/
void
* General Public License for more details.
*/
-/**
+/*
* rport.c Remote port implementation.
*/
static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len);
static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
-/**
+/*
* fcs_rport_sm FCS rport state machine events
*/
{BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
};
-/**
+/*
* Beginning state.
*/
static void
}
}
-/**
+/*
* PLOGI is being sent.
*/
static void
}
}
-/**
+/*
* PLOGI is being sent.
*/
static void
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_SCN:
- /**
+ /*
* Ignore, SCN is possibly online notification.
*/
break;
break;
case RPSM_EVENT_HCB_OFFLINE:
- /**
+ /*
* Ignore BFA callback, on a PLOGI receive we call bfa offline.
*/
break;
}
}
-/**
+/*
* PLOGI is sent.
*/
static void
}
}
-/**
+/*
* PLOGI is sent.
*/
static void
}
}
-/**
+/*
* PLOGI is complete. Awaiting BFA rport online callback. FC-4s
* are offline.
*/
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* @todo
* Ignore SCN - PLOGI just completed, FC-4 login should detect
* device failures.
}
}
-/**
+/*
* Rport is ONLINE. FC-4s active.
*/
static void
}
}
-/**
+/*
* An SCN event is received in ONLINE state. NS query is being sent
* prior to ADISC authentication with rport. FC-4s are paused.
*/
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* ignore SCN, wait for response to query itself
*/
break;
}
}
-/**
+/*
* An SCN event is received in ONLINE state. NS query is sent to rport.
* FC-4s are paused.
*/
}
}
-/**
+/*
* An SCN event is received in ONLINE state. ADISC is being sent for
* authenticating with rport. FC-4s are paused.
*/
}
}
-/**
+/*
* An SCN event is received in ONLINE state. ADISC is to rport.
* FC-4s are paused.
*/
break;
case RPSM_EVENT_PLOGI_RCVD:
- /**
+ /*
* Too complex to cleanup FC-4 & rport and then acc to PLOGI.
* At least go offline when a PLOGI is received.
*/
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* already processing RSCN
*/
break;
}
}
-/**
+/*
* Rport has sent LOGO. Awaiting FC-4 offline completion callback.
*/
static void
}
}
-/**
+/*
* LOGO needs to be sent to rport. Awaiting FC-4 offline completion
* callback.
*/
}
}
-/**
+/*
* Rport is going offline. Awaiting FC-4 offline completion callback.
*/
static void
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
- /**
+ /*
* rport is already going offline.
* SCN - ignore and wait till transitioning to offline state
*/
}
}
-/**
+/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback.
*/
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
- /**
+ /*
* Ignore, already offline.
*/
break;
}
}
-/**
+/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback to send LOGO accept.
*/
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
- /**
+ /*
* Ignore - already processing a LOGO.
*/
break;
}
}
-/**
+/*
* Rport is being deleted. FC-4s are offline.
* Awaiting BFA rport offline
* callback to send LOGO.
}
}
-/**
+/*
* Rport is being deleted. FC-4s are offline. LOGO is being sent.
*/
static void
}
}
-/**
+/*
* Rport is offline. FC-4s are offline. BFA rport is offline.
* Timer active to delete stale rport.
*/
}
}
-/**
+/*
* Rport address has changed. Nameserver discovery request is being sent.
*/
static void
}
}
-/**
+/*
* Nameserver discovery failed. Waiting for timeout to retry.
*/
static void
}
}
-/**
+/*
* Rport address has changed. Nameserver discovery request is sent.
*/
static void
bfa_fcs_rport_send_prlo_acc(rport);
break;
case RPSM_EVENT_SCN:
- /**
+ /*
* ignore, wait for NS query response
*/
break;
case RPSM_EVENT_LOGO_RCVD:
- /**
+ /*
* Not logged-in yet. Accept LOGO.
*/
bfa_fcs_rport_send_logo_acc(rport);
-/**
+/*
* fcs_rport_private FCS RPORT provate functions
*/
plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
- /**
+ /*
* Check for failure first.
*/
if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
return;
}
- /**
+ /*
* PLOGI is complete. Make sure this device is not one of the known
* device with a new FC port address.
*/
}
}
- /**
+ /*
* Normal login path -- no evil twins.
*/
rport->stats.plogi_accs++;
}
}
-/**
+/*
* Called to send a logout to the rport.
*/
static void
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
-/**
+/*
* Send ACC for a LOGO received.
*/
static void
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
}
-/**
+/*
* brief
* This routine will be called by bfa_timer on timer timeouts.
*
struct bfa_fcs_rport_s *rport;
struct bfad_rport_s *rport_drv;
- /**
+ /*
* allocate rport
*/
if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
rport->pid = rpid;
rport->pwwn = pwwn;
- /**
+ /*
* allocate BFA rport
*/
rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
return NULL;
}
- /**
+ /*
* allocate FC-4s
*/
bfa_assert(bfa_fcs_lport_is_initiator(port));
{
struct bfa_fcs_lport_s *port = rport->port;
- /**
+ /*
* - delete FC-4s
* - delete BFA rport
* - remove from queue of rports
}
}
-/**
+/*
* Update rport parameters from PLOGI or PLOGI accept.
*/
static void
{
bfa_fcs_lport_t *port = rport->port;
- /**
+ /*
* - port name
* - node name
*/
rport->pwwn = plogi->port_name;
rport->nwwn = plogi->node_name;
- /**
+ /*
* - class of service
*/
rport->fc_cos = 0;
if (plogi->class2.class_valid)
rport->fc_cos |= FC_CLASS_2;
- /**
+ /*
* - CISC
* - MAX receive frame size
*/
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
- /**
+ /*
* Direct Attach P2P mode :
* This is to handle a bug (233476) in IBM targets in Direct Attach
* Mode. Basically, in FLOGI Accept the target would have
}
-/**
+/*
* Called to handle LOGO received from an existing remote port.
*/
static void
-/**
+/*
* fcs_rport_public FCS rport public interfaces
*/
-/**
+/*
* Called by bport/vport to create a remote port instance for a discovered
* remote device.
*
return rport;
}
-/**
+/*
* Called to create a rport for which only the wwn is known.
*
* @param[in] port - base port
bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
return rport;
}
-/**
+/*
* Called by bport in private loop topology to indicate that a
* rport has been discovered and plogi has been completed.
*
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
}
-/**
+/*
* Called by bport/vport to handle PLOGI received from a new remote port.
* If an existing rport does a plogi, it will be handled separately.
*/
return 0;
}
-/**
+/*
* Called by bport/vport to handle PLOGI received from an existing
* remote port.
*/
bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
struct fc_logi_s *plogi)
{
- /**
+ /*
* @todo Handle P2P and initiator-initiator.
*/
rport->reply_oxid = rx_fchs->ox_id;
bfa_trc(rport->fcs, rport->reply_oxid);
- /**
+ /*
* In Switched fabric topology,
* PLOGI to each other. If our pwwn is smaller, ignore it,
* if it is not a well known address.
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
}
-/**
+/*
* Called by bport/vport to delete a remote port instance.
*
* Rport delete is called under the following conditions:
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
-/**
+/*
* Called by bport/vport to when a target goes offline.
*
*/
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
}
-/**
+/*
* Called by bport in n2n when a target (attached port) becomes online.
*
*/
{
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
}
-/**
+/*
* Called by bport/vport to notify SCN for the remote port
*/
void
bfa_sm_send_event(rport, RPSM_EVENT_SCN);
}
-/**
+/*
* Called by fcpim to notify that the ITN cleanup is done.
*/
void
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
}
-/**
+/*
* Called by fcptm to notify that the ITN cleanup is done.
*/
void
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
}
-/**
+/*
* brief
* This routine BFA callback for bfa_rport_online() call.
*
bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
}
-/**
+/*
* brief
* This routine BFA callback for bfa_rport_offline() call.
*
bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
}
-/**
+/*
* brief
* This routine is a static BFA callback when there is a QoS flow_id
* change notification
bfa_trc(rport->fcs, rport->pwwn);
}
-/**
+/*
* brief
* This routine is a static BFA callback when there is a QoS priority
* change notification
bfa_trc(rport->fcs, rport->pwwn);
}
-/**
+/*
* Called to process any unsolicted frames from this remote port
*/
void
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
}
-/**
+/*
* Called to process any unsolicted frames from this remote port
*/
void
FC_MAX_PDUSZ, 0);
}
-/**
+/*
* Return state of rport.
*/
int
return bfa_sm_to_state(rport_sm_table, rport->sm);
}
-/**
+/*
* brief
* Called by the Driver to set rport delete/ageout timeout
*
-/**
+/*
* Remote port implementation.
*/
-/**
+/*
* fcs_rport_api FCS rport API.
*/
-/**
+/*
* Direct API to add a target by port wwn. This interface is used, for
* example, by bios when target pwwn is known from boot lun configuration.
*/
return BFA_STATUS_OK;
}
-/**
+/*
* Direct API to remove a target and its associated resources. This
* interface is used, for example, by driver to remove target
* ports from the target list for a VM.
}
-/**
+/*
* Remote device status for display/debug.
*/
void
}
}
-/**
+/*
* Per remote device statistics.
*/
void
-/**
+/*
* Remote port features (RPF) implementation.
*/
static void bfa_fcs_rpf_timeout(void *arg);
-/**
+/*
* fcs_rport_ftrs_sm FCS rport state machine events
*/
bfa_sm_fault(rport->fcs, event);
}
}
-/**
+/*
* Called when Rport is created.
*/
void
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
}
-/**
+/*
* Called when Rport becomes online
*/
void
bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
}
-/**
+/*
* Called when Rport becomes offline
*/
void
*num_vecs = __HFN_NUMINTS;
}
-/**
+/*
* No special setup required for crossbow -- vector assignments are implicit.
*/
void
bfa->msix.handler[i] = bfa_msix_lpu_err;
}
-/**
+/*
* Crossbow -- dummy, interrupts are masked
*/
void
{
}
-/**
+/*
* No special enable/disable -- vector assignments are implicit.
*/
void
writel(0, kva + __ct_msix_err_vec_reg[fn]);
}
-/**
+/*
* Dummy interrupt handler for handling spurious interrupt during chip-reinit.
*/
static void
*num_vecs = BFA_MSIX_CT_MAX;
}
-/**
+/*
* Setup MSI-X vector for catapult
*/
void
bfa->msix.handler[i] = bfa_hwct_msix_dummy;
}
-/**
+/*
* Enable MSI-X vectors
*/
void
BFA_TRC_FILE(CNA, IOC);
-/**
+/*
* IOC local definitions
*/
#define BFA_IOC_TOV 3000 /* msecs */
BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
-/**
+/*
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
-/**
+/*
* hal_ioc_sm
*/
-/**
+/*
* IOC state machine definitions/declarations
*/
enum ioc_event {
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
};
-/**
+/*
* IOCPF state machine definitions/declarations
*/
static void bfa_iocpf_timeout(void *ioc_arg);
static void bfa_iocpf_sem_timeout(void *ioc_arg);
-/**
+/*
* IOCPF state machine events
*/
enum iocpf_event {
IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
};
-/**
+/*
* IOCPF states
*/
enum bfa_iocpf_state {
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
-/**
+/*
* IOC State Machine
*/
-/**
+/*
* Beginning state. IOC uninit state.
*/
{
}
-/**
+/*
* IOC is in uninit state.
*/
static void
bfa_sm_fault(ioc, event);
}
}
-/**
+/*
* Reset entry actions -- initialize state machine
*/
static void
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
-/**
+/*
* IOC is in reset state.
*/
static void
bfa_iocpf_enable(ioc);
}
-/**
+/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
bfa_ioc_send_getattr(ioc);
}
-/**
+/*
* IOC configuration in progress. Timer is active.
*/
static void
BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
}
-/**
+/*
* IOC is being disabled
*/
static void
}
}
-/**
+/*
* IOC disable completion entry.
*/
static void
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
}
-/**
+/*
* Hardware initialization failed.
*/
static void
break;
case IOC_E_FAILED:
- /**
+ /*
* Initialization failure during iocpf init retry.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
struct bfa_ioc_hbfail_notify_s *notify;
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
- /**
+ /*
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
"Heart Beat of IOC has failed\n");
}
-/**
+/*
* IOC failure.
*/
static void
switch (event) {
case IOC_E_FAILED:
- /**
+ /*
* Initialization failure during iocpf recovery.
* !!! Fall through !!!
*/
-/**
+/*
* IOCPF State Machine
*/
-/**
+/*
* Reset entry actions -- initialize state machine
*/
static void
iocpf->auto_recover = bfa_auto_recover;
}
-/**
+/*
* Beginning state. IOC is in reset state.
*/
static void
}
}
-/**
+/*
* Semaphore should be acquired for version check.
*/
static void
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
+/*
* Awaiting h/w semaphore to continue with version check.
*/
static void
}
}
-/**
+/*
* Notify enable completion callback.
*/
static void
bfa_iocpf_timer_start(iocpf->ioc);
}
-/**
+/*
* Awaiting firmware version match.
*/
static void
}
}
-/**
+/*
* Request for semaphore.
*/
static void
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
+/*
* Awaiting semaphore for h/w initialzation.
*/
static void
bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
}
-/**
+/*
* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
bfa_ioc_send_enable(iocpf->ioc);
}
-/**
+/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
bfa_ioc_send_disable(iocpf->ioc);
}
-/**
+/*
* IOC is being disabled
*/
static void
}
}
-/**
+/*
* IOC disable completion entry.
*/
static void
bfa_iocpf_timer_start(iocpf->ioc);
}
-/**
+/*
* Hardware initialization failed.
*/
static void
static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
{
- /**
+ /*
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(iocpf->ioc);
writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
- /**
+ /*
* Notify other functions on HB failure.
*/
bfa_ioc_notify_hbfail(iocpf->ioc);
- /**
+ /*
* Flush any queued up mailbox requests.
*/
bfa_ioc_mbox_hbfail(iocpf->ioc);
bfa_iocpf_recovery_timer_start(iocpf->ioc);
}
-/**
+/*
* IOC is in failed state.
*/
static void
-/**
+/*
* hal_ioc_pvt BFA IOC private functions
*/
ioc->cbfn->disable_cbfn(ioc->bfa);
- /**
+ /*
* Notify common modules registered for notification.
*/
list_for_each(qe, &ioc->hb_notify_q) {
{
u32 r32;
- /**
+ /*
* First read to the semaphore register will return 0, subsequent reads
* will return 1. Semaphore is released by writing 1 to the register
*/
bfa_sem_timer_stop(ioc);
}
-/**
+/*
* Initialize LPU local memory (aka secondary memory / SRAM)
*/
static void
pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
- /**
+ /*
* wait for memory initialization to be complete
*/
i = 0;
i++;
} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
- /**
+ /*
* If memory initialization is not successful, IOC timeout will catch
* such failures.
*/
{
u32 pss_ctl;
- /**
+ /*
* Take processor out of reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
{
u32 pss_ctl;
- /**
+ /*
* Put processors in reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
-/**
+/*
* Get driver and firmware versions.
*/
void
}
}
-/**
+/*
* Returns TRUE if same.
*/
bfa_boolean_t
return BFA_TRUE;
}
-/**
+/*
* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
{
struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
- /**
+ /*
* If bios/efi boot (flash based) -- return true
*/
if (bfa_ioc_is_bios_optrom(ioc))
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
-/**
+/*
* Conditionally flush any pending message from firmware at start.
*/
static void
boot_type = BFI_BOOT_TYPE_NORMAL;
boot_env = BFI_BOOT_LOADER_OS;
- /**
+ /*
* Flash based firmware boot BIOS env.
*/
if (bfa_ioc_is_bios_optrom(ioc)) {
boot_env = BFI_BOOT_LOADER_BIOS;
}
- /**
+ /*
* Flash based firmware boot UEFI env.
*/
if (bfa_ioc_is_uefi(ioc)) {
boot_env = BFI_BOOT_LOADER_UEFI;
}
- /**
+ /*
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
return;
}
- /**
+ /*
* If hardware initialization is in progress (initialized by other IOC),
* just wait for an initialization completion interrupt.
*/
return;
}
- /**
+ /*
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
*
if (ioc_fwstate == BFI_IOC_DISABLED ||
(!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
- /**
+ /*
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
return;
}
- /**
+ /*
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, boot_type, boot_env);
}
-/**
+/*
* Initiate a full firmware download.
*/
static void
u32 chunkno = 0;
u32 i;
- /**
+ /*
* Initialize LMEM first before code download
*/
bfa_ioc_lmem_init(ioc);
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
- /**
+ /*
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
loff += sizeof(u32);
- /**
+ /*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
bfa_ioc_hwinit(ioc, force);
}
-/**
+/*
* Update BFA configuration from firmware configuration.
*/
static void
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
-/**
+/*
* Attach time initialization of mbox logic.
*/
static void
}
}
-/**
+/*
* Mbox poll timer -- restarts any pending mailbox requests.
*/
static void
struct bfa_mbox_cmd_s *cmd;
u32 stat;
- /**
+ /*
* If no command pending, do nothing
*/
if (list_empty(&mod->cmd_q))
return;
- /**
+ /*
* If previous command is not yet fetched by firmware, do nothing
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat)
return;
- /**
+ /*
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
-/**
+/*
* Cleanup any pending requests.
*/
static void
bfa_q_deq(&mod->cmd_q, &cmd);
}
-/**
+/*
* Read data from SMEM to host through PCI memmap
*
* @param[in] ioc memory for IOC
buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
- /**
+ /*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
return BFA_STATUS_OK;
}
-/**
+/*
* Clear SMEM data from host through PCI memmap
*
* @param[in] ioc memory for IOC
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
- /**
+ /*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
return BFA_STATUS_OK;
}
-/**
+/*
* hal iocpf to ioc interface
*/
static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
- /**
+ /*
* Provide enable completion callback.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-/**
+/*
* hal_ioc_public
*/
return BFA_STATUS_OK;
}
-/**
+/*
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
return;
- /**
+ /*
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
- /**
+ /*
* Enable interrupts just before starting LPU
*/
ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_ioc_lpu_start(ioc);
}
-/**
+/*
* Enable/disable IOC failure auto recovery.
*/
void
u32 r32;
int i;
- /**
+ /*
* read the MBOX msg
*/
for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
msgp[i] = cpu_to_be32(r32);
}
- /**
+ /*
* turn off mailbox interrupt by clearing mailbox status
*/
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
}
-/**
+/*
* IOC attach time initialization and setup.
*
* @param[in] ioc memory for IOC
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
-/**
+/*
* Driver detach time IOC cleanup.
*/
void
bfa_fsm_send_event(ioc, IOC_E_DETACH);
}
-/**
+/*
* Setup IOC PCI properties.
*
* @param[in] pcidev PCI device information for this IOC
ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
ioc->cna = ioc->ctdev && !ioc->fcmode;
- /**
+ /*
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
if (ioc->ctdev)
bfa_ioc_reg_init(ioc);
}
-/**
+/*
* Initialize IOC dma memory
*
* @param[in] dm_kva kernel virtual address of IOC dma memory
void
bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
{
- /**
+ /*
* dma memory for firmware attribute
*/
ioc->attr_dma.kva = dm_kva;
ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
}
-/**
+/*
* Return size of dma memory required.
*/
u32
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
-/**
+/*
* Returns memory required for saving firmware trace in case of crash.
* Driver must call this interface to allocate memory required for
* automatic saving of firmware trace. Driver should call
return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
}
-/**
+/*
* Initialize memory for saving firmware trace. Driver must initialize
* trace memory before call bfa_ioc_enable().
*/
return PSS_SMEM_PGOFF(fmaddr);
}
-/**
+/*
* Register mailbox message handler functions
*
* @param[in] ioc IOC instance
mod->mbhdlr[mc].cbfn = mcfuncs[mc];
}
-/**
+/*
* Register mailbox message handler function, to be called by common modules
*/
void
mod->mbhdlr[mc].cbarg = cbarg;
}
-/**
+/*
* Queue a mailbox command request to firmware. Waits if mailbox is busy.
* Responsibility of caller to serialize
*
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
u32 stat;
- /**
+ /*
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
return;
}
- /**
+ /*
* If mailbox is busy, queue command for poll timer
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
return;
}
- /**
+ /*
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
-/**
+/*
* Handle mailbox interrupts
*/
void
bfa_ioc_msgget(ioc, &m);
- /**
+ /*
* Treat IOC message class as special.
*/
mc = m.mh.msg_class;
ioc->port_id = bfa_ioc_pcifn(ioc);
}
-/**
+/*
* return true if IOC is disabled
*/
bfa_boolean_t
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
-/**
+/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
-/**
+/*
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
*/
return BFA_TRUE;
}
-/**
+/*
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
ioc_attr = ioc->attr;
- /**
+ /*
* model name
*/
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
-/**
+/*
* hal_wwn_public
*/
wwn_t
return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
}
-/**
+/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
return BFA_STATUS_OK;
}
-/**
+/*
* Clear saved firmware trace
*/
void
ioc->dbg_fwsave_once = BFA_TRUE;
}
-/**
+/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_send_fwsync(ioc);
- /**
+ /*
* After sending a fw sync mbox command wait for it to
* take effect. We will not wait for a response because
* 1. fw_sync mbox cmd doesn't have a response.
fwsync_iter--;
}
-/**
+/*
* Dump firmware smem
*/
bfa_status_t
loff = *offset;
dlen = *buflen;
- /**
+ /*
* First smem read, sync smem before proceeding
* No need to sync before reading every chunk.
*/
return status;
}
-/**
+/*
* Firmware statistics
*/
bfa_status_t
return status;
}
-/**
+/*
* Save firmware trace if configured.
*/
static void
}
}
-/**
+/*
* Firmware failure detected. Start recovery actions.
*/
static void
return;
}
-/**
+/*
* hal_iocpf_pvt BFA IOC PF private functions
*/
bfa_ioc_hw_sem_get(ioc);
}
-/**
+/*
* bfa timer function
*/
void
}
}
-/**
+/*
* Should be called with lock protection
*/
void
list_add_tail(&timer->qe, &mod->timer_q);
}
-/**
+/*
* Should be called with lock protection
*/
void
struct bfa_ioc_hwif_s hwif_cb;
-/**
+/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
ioc->ioc_hwif = &hwif_cb;
}
-/**
+/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
{
}
-/**
+/*
* Notify other functions on HB failure.
*/
static void
readl(ioc->ioc_regs.err_set);
}
-/**
+/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
};
-/**
+/*
* Host <-> LPU mailbox command/status registers
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
}
- /**
+ /*
* Host <-> LPU mailbox command/status registers
*/
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
- /**
+ /*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
-/**
+/*
* Initialize IOC to port mapping.
*/
static void
bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
{
- /**
+ /*
* For crossbow, port id is same as pci function.
*/
ioc->port_id = bfa_ioc_pcifn(ioc);
bfa_trc(ioc, ioc->port_id);
}
-/**
+/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
{
}
-/**
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
struct bfa_ioc_hwif_s hwif_ct;
-/**
+/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
ioc->ioc_hwif = &hwif_ct;
}
-/**
+/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
u32 usecnt;
struct bfi_ioc_image_hdr_s fwhdr;
- /**
+ /*
* Firmware match check is relevant only for CNA.
*/
if (!ioc->cna)
return BFA_TRUE;
- /**
+ /*
* If bios boot (flash based) -- do not increment usage count
*/
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
- /**
+ /*
* If usage count is 0, always return TRUE.
*/
if (usecnt == 0) {
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
bfa_trc(ioc, ioc_fwstate);
- /**
+ /*
* Use count cannot be non-zero and chip in uninitialized state.
*/
bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
- /**
+ /*
* Check if another driver with a different firmware is active
*/
bfa_ioc_fwver_get(ioc, &fwhdr);
return BFA_FALSE;
}
- /**
+ /*
* Same firmware version. Increment the reference count.
*/
usecnt++;
{
u32 usecnt;
- /**
+ /*
* Firmware lock is relevant only for CNA.
*/
if (!ioc->cna)
return;
- /**
+ /*
* If bios boot (flash based) -- do not decrement usage count
*/
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
- /**
+ /*
* decrement usage count
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
-/**
+/*
* Notify other functions on HB failure.
*/
static void
}
}
-/**
+/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
-/**
+/*
* Host <-> LPU mailbox command/status registers - port 0
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
};
-/**
+/*
* Host <-> LPU mailbox command/status registers - port 1
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
- /**
+ /*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
-/**
+/*
* Initialize IOC to port mapping.
*/
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
- /**
+ /*
* For catapult, base port id on personality register and IOC type
*/
r32 = readl(rb + FNC_PERS_REG);
bfa_trc(ioc, ioc->port_id);
}
-/**
+/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
__F0_INTX_STATUS;
- /**
+ /*
* If already in desired mode, do not change anything
*/
if (!msix && mode)
writel(r32, rb + FNC_PERS_REG);
}
-/**
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
}
}
-/**
+/*
* bfa_port_enable_isr()
*
*
port->endis_cbfn(port->endis_cbarg, status);
}
-/**
+/*
* bfa_port_disable_isr()
*
*
port->endis_cbfn(port->endis_cbarg, status);
}
-/**
+/*
* bfa_port_get_stats_isr()
*
*
}
}
-/**
+/*
* bfa_port_clear_stats_isr()
*
*
port->stats_status = status;
port->stats_busy = BFA_FALSE;
- /**
+ /*
* re-initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
}
}
-/**
+/*
* bfa_port_isr()
*
*
}
}
-/**
+/*
* bfa_port_meminfo()
*
*
return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
}
-/**
+/*
* bfa_port_mem_claim()
*
*
port->stats_dma.pa = dma_pa;
}
-/**
+/*
* bfa_port_enable()
*
* Send the Port enable request to the f/w
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_disable()
*
* Send the Port disable request to the f/w
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_get_stats()
*
* Send the request to the f/w to fetch Port statistics.
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_clear_stats()
*
*
return BFA_STATUS_OK;
}
-/**
+/*
* bfa_port_hbfail()
*
*
}
}
-/**
+/*
* bfa_port_attach()
*
*
bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
- /**
+ /*
* initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
bfa_trc(port, 0);
}
-/**
+/*
* bfa_port_detach()
*
*
BFA_MODULE(rport);
BFA_MODULE(uf);
-/**
+/*
* LPS related definitions
*/
#define BFA_LPS_MIN_LPORTS (1)
#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
-/**
+/*
* lps_pvt BFA LPS private functions
*/
BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
};
-/**
+/*
* FC PORT related definitions
*/
/*
(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
-/**
+/*
* BFA port state machine events
*/
enum bfa_fcport_sm_event {
BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
};
-/**
+/*
* BFA port link notification state machine events
*/
BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
};
-/**
+/*
* RPORT related definitions
*/
#define bfa_rport_offline_cb(__rp) do { \
BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
};
-/**
+/*
* forward declarations FCXP related functions
*/
static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_req_s *send_req);
-/**
+/*
* forward declarations for LPS functions
*/
static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
-/**
+/*
* forward declaration for LPS state machine
*/
static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
event);
-/**
+/*
* forward declaration for FC Port functions
*/
static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
static void bfa_fcport_stats_clr_timeout(void *cbarg);
static void bfa_trunk_iocdisable(struct bfa_s *bfa);
-/**
+/*
* forward declaration for FC PORT state machine
*/
static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
};
-/**
+/*
* forward declaration for RPORT related functions
*/
static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
static void __bfa_cb_rport_offline(void *cbarg,
bfa_boolean_t complete);
-/**
+/*
* forward declaration for RPORT state machine
*/
static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
-/**
+/*
* PLOG related definitions
*/
static int
return (bfa_boolean_t)plog->plog_enabled;
}
-/**
+/*
* fcxp_pvt BFA FCXP private functions
*/
mod->bfa = bfa;
mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
- /**
+ /*
* Initialize FCXP request and response payload sizes.
*/
mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
- /**
+ /*
* @todo f/w should not set residue to non-0 when everything
* is received.
*/
}
}
-/**
+/*
* Handler to resume sending fcxp when space in available in cpe queue.
*/
static void
bfa_fcxp_queue(fcxp, send_req);
}
-/**
+/*
* Queue fcxp send request to foimrware.
*/
static void
bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
}
-/**
+/*
* hal_fcxp_api BFA FCXP API
*/
-/**
+/*
* Allocate an FCXP instance to send a response or to send a request
* that has a response. Request/response buffers are allocated by caller.
*
return fcxp;
}
-/**
+/*
* Get the internal request buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
return mod->req_pld_sz;
}
-/**
+/*
* Get the internal response buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
return rspbuf;
}
-/**
+/*
* Free the BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
bfa_fcxp_put(fcxp);
}
-/**
+/*
* Send a FCXP request
*
* @param[in] fcxp BFA fcxp pointer
bfa_trc(bfa, fcxp->fcxp_tag);
- /**
+ /*
* setup request/response info
*/
reqi->bfa_rport = rport;
fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
fcxp->send_cbarg = cbarg;
- /**
+ /*
* If no room in CPE queue, wait for space in request queue
*/
send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
bfa_fcxp_queue(fcxp, send_req);
}
-/**
+/*
* Abort a BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
void
bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
{
- /**
+ /*
* If waiting for room in request queue, cancel reqq wait
* and free fcxp.
*/
-/**
+/*
* hal_fcxp_public BFA FCXP public functions
*/
}
-/**
+/*
* BFA LPS state machine functions
*/
-/**
+/*
* Init state -- no login
*/
static void
}
}
-/**
+/*
* login is in progress -- awaiting response from firmware
*/
static void
}
}
-/**
+/*
* login pending - awaiting space in request queue
*/
static void
}
}
-/**
+/*
* login complete
*/
static void
}
}
-/**
+/*
* logout in progress - awaiting firmware response
*/
static void
}
}
-/**
+/*
* logout pending -- awaiting space in request queue
*/
static void
-/**
+/*
* lps_pvt BFA LPS private functions
*/
-/**
+/*
* return memory requirement
*/
static void
*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
}
-/**
+/*
* bfa module attach at initialization time
*/
static void
{
}
-/**
+/*
* IOC in disabled state -- consider all lps offline
*/
static void
}
}
-/**
+/*
* Firmware login response
*/
static void
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
-/**
+/*
* Firmware logout response
*/
static void
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
-/**
+/*
* Firmware received a Clear virtual link request (for FCoE)
*/
static void
bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
}
-/**
+/*
* Space is available in request queue, resume queueing request to firmware.
*/
static void
bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
}
-/**
+/*
* lps is freed -- triggered by vport delete
*/
static void
list_add_tail(&lps->qe, &mod->lps_free_q);
}
-/**
+/*
* send login request to firmware
*/
static void
bfa_reqq_produce(lps->bfa, lps->reqq);
}
-/**
+/*
* send logout request to firmware
*/
static void
bfa_reqq_produce(lps->bfa, lps->reqq);
}
-/**
+/*
* Indirect login completion handler for non-fcs
*/
static void
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
-/**
+/*
* Login completion handler -- direct call for fcs, queue for others
*/
static void
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
-/**
+/*
* Indirect logout completion handler for non-fcs
*/
static void
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
}
-/**
+/*
* Logout completion handler -- direct call for fcs, queue for others
*/
static void
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
}
-/**
+/*
* Clear virtual link completion handler for non-fcs
*/
static void
bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
}
-/**
+/*
* Received Clear virtual link event --direct call for fcs,
* queue for others
*/
-/**
+/*
* lps_public BFA LPS public functions
*/
return BFA_LPS_MAX_VPORTS_SUPP_CB;
}
-/**
+/*
* Allocate a lport srvice tag.
*/
struct bfa_lps_s *
return lps;
}
-/**
+/*
* Free lport service tag. This can be called anytime after an alloc.
* No need to wait for any pending login/logout completions.
*/
bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
}
-/**
+/*
* Initiate a lport login.
*/
void
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
-/**
+/*
* Initiate a lport fdisc login.
*/
void
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
-/**
+/*
* Initiate a lport logout (flogi).
*/
void
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
-/**
+/*
* Initiate a lport FDSIC logout.
*/
void
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
-/**
+/*
* Discard a pending login request -- should be called only for
* link down handling.
*/
bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
}
-/**
+/*
* Return lport services tag
*/
u8
return lps->lp_tag;
}
-/**
+/*
* Return lport services tag given the pid
*/
u8
return 0;
}
-/**
+/*
* return if fabric login indicates support for NPIV
*/
bfa_boolean_t
return lps->npiv_en;
}
-/**
+/*
* Return TRUE if attached to F-Port, else return FALSE
*/
bfa_boolean_t
return lps->fport;
}
-/**
+/*
* Return TRUE if attached to a Brocade Fabric
*/
bfa_boolean_t
{
return lps->brcd_switch;
}
-/**
+/*
* return TRUE if authentication is required
*/
bfa_boolean_t
return lps->ext_status;
}
-/**
+/*
* return port id assigned to the lport
*/
u32
return lps->lp_pid;
}
-/**
+/*
* return port id assigned to the base lport
*/
u32
return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
}
-/**
+/*
* Return bb_credit assigned in FLOGI response
*/
u16
return lps->pr_bbcred;
}
-/**
+/*
* Return peer port name
*/
wwn_t
return lps->pr_pwwn;
}
-/**
+/*
* Return peer node name
*/
wwn_t
return lps->pr_nwwn;
}
-/**
+/*
* return reason code if login request is rejected
*/
u8
return lps->lsrjt_rsn;
}
-/**
+/*
* return explanation code if login request is rejected
*/
u8
return lps->lsrjt_expl;
}
-/**
+/*
* Return fpma/spma MAC for lport
*/
mac_t
return lps->lp_mac;
}
-/**
+/*
* LPS firmware message class handler.
*/
void
}
}
-/**
+/*
* FC PORT state machine functions
*/
static void
switch (event) {
case BFA_FCPORT_SM_START:
- /**
+ /*
* Start event after IOC is configured and BFA is started.
*/
if (bfa_fcport_send_enable(fcport)) {
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Port is persistently configured to be in enabled state. Do
* not change state. Port enabling is done when START event is
* received.
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* If a port is persistently configured to be disabled, the
* first event will a port disable request.
*/
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already enable is in progress.
*/
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Just send disable request to firmware when room becomes
* available in request queue.
*/
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already being enabled.
*/
break;
break;
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link down event.
*/
break;
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already enabled.
*/
break;
switch (event) {
case BFA_FCPORT_SM_ENABLE:
- /**
+ /*
* Already enabled.
*/
break;
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Already being disabled.
*/
break;
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Already being disabled.
*/
break;
case BFA_FCPORT_SM_LINKUP:
case BFA_FCPORT_SM_LINKDOWN:
- /**
+ /*
* Possible to get link events when doing back-to-back
* enable/disables.
*/
switch (event) {
case BFA_FCPORT_SM_START:
- /**
+ /*
* Ignore start event for a port that is disabled.
*/
break;
break;
case BFA_FCPORT_SM_DISABLE:
- /**
+ /*
* Already disabled.
*/
break;
break;
default:
- /**
+ /*
* Ignore all other events.
*/
;
}
}
-/**
+/*
* Port is enabled. IOC is down/failed.
*/
static void
break;
default:
- /**
+ /*
* Ignore all events.
*/
;
}
}
-/**
+/*
* Port is disabled. IOC is down/failed.
*/
static void
break;
default:
- /**
+ /*
* Ignore all events.
*/
;
}
}
-/**
+/*
* Link state is down
*/
static void
}
}
-/**
+/*
* Link state is waiting for down notification
*/
static void
}
}
-/**
+/*
* Link state is waiting for down notification and there is a pending up
*/
static void
}
}
-/**
+/*
* Link state is up
*/
static void
}
}
-/**
+/*
* Link state is waiting for up notification
*/
static void
}
}
-/**
+/*
* Link state is waiting for up notification and there is a pending down
*/
static void
}
}
-/**
+/*
* Link state is waiting for up notification and there are pending down and up
*/
static void
-/**
+/*
* hal_port_private
*/
bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
}
-/**
+/*
* Send SCN notification to upper layers.
* trunk - false if caller is fcport to ignore fcport event in trunked mode
*/
bfa_meminfo_dma_phys(meminfo) = dm_pa;
}
-/**
+/*
* Memory initialization.
*/
static void
bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
- /**
+ /*
* initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
fcport->stats_reset_time = tv.tv_sec;
- /**
+ /*
* initialize and set default configuration
*/
port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
{
}
-/**
+/*
* Called when IOC is ready.
*/
static void
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
}
-/**
+/*
* Called before IOC is stopped.
*/
static void
bfa_trunk_iocdisable(bfa);
}
-/**
+/*
* Called when IOC failure is detected.
*/
static void
fcport->qos_attr = pevent->link_state.qos_attr;
fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
- /**
+ /*
* update trunk state if applicable
*/
if (!fcport->cfg.trunked)
fcport->topology = BFA_PORT_TOPOLOGY_NONE;
}
-/**
+/*
* Send port enable message to firmware.
*/
static bfa_boolean_t
{
struct bfi_fcport_enable_req_s *m;
- /**
+ /*
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
fcport->msgtag++;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
return BFA_TRUE;
}
-/**
+/*
* Send port disable message to firmware.
*/
static bfa_boolean_t
{
struct bfi_fcport_req_s *m;
- /**
+ /*
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
fcport->msgtag++;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
bfa_lpuid(fcport->bfa));
m->msgtag = fcport->msgtag;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
struct bfa_fcport_s *fcport = port_cbarg;
struct bfi_fcport_set_svc_params_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
bfa_lpuid(fcport->bfa));
m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
if (complete) {
struct bfa_timeval_s tv;
- /**
+ /*
* re-initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
}
-/**
+/*
* Handle trunk SCN event from firmware.
*/
static void
bfa_trc(fcport->bfa, scn->trunk_state);
bfa_trc(fcport->bfa, scn->trunk_speed);
- /**
+ /*
* Save off new state for trunk attribute query
*/
state_prev = trunk->attr.state;
BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
}
- /**
+ /*
* Notify upper layers if trunk state changed.
*/
if ((state_prev != trunk->attr.state) ||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
int i = 0;
- /**
+ /*
* In trunked mode, notify upper layers that link is down
*/
if (fcport->cfg.trunked) {
-/**
+/*
* hal_port_public
*/
-/**
+/*
* Called to initialize port attributes
*/
void
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- /**
+ /*
* Initialize port attributes from IOC hardware data.
*/
bfa_fcport_set_wwns(fcport);
bfa_assert(fcport->speed_sup);
}
-/**
+/*
* Firmware message handler.
*/
void
-/**
+/*
* hal_port_api
*/
-/**
+/*
* Registered callback for port events.
*/
void
return BFA_STATUS_OK;
}
-/**
+/*
* Configure port speed.
*/
bfa_status_t
return BFA_STATUS_OK;
}
-/**
+/*
* Get current speed.
*/
enum bfa_port_speed
return fcport->speed;
}
-/**
+/*
* Configure port topology.
*/
bfa_status_t
return BFA_STATUS_OK;
}
-/**
+/*
* Get current topology.
*/
enum bfa_port_topology
bfa_fcport_send_txcredit(fcport);
}
-/**
+/*
* Get port attributes.
*/
#define BFA_FCPORT_STATS_TOV 1000
-/**
+/*
* Fetch port statistics (FCQoS or FCoE).
*/
bfa_status_t
return BFA_STATUS_OK;
}
-/**
+/*
* Reset port statistics (FCQoS or FCoE).
*/
bfa_status_t
return BFA_STATUS_OK;
}
-/**
+/*
* Fetch FCQoS port statistics
*/
bfa_status_t
return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
}
-/**
+/*
* Reset FCoE port statistics
*/
bfa_status_t
return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
}
-/**
+/*
* Fetch FCQoS port statistics
*/
bfa_status_t
return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
}
-/**
+/*
* Reset FCoE port statistics
*/
bfa_status_t
}
}
-/**
+/*
* Fetch port attributes.
*/
bfa_boolean_t
if (ioc_type == BFA_IOC_TYPE_FC) {
fcport->cfg.qos_enabled = on_off;
- /**
+ /*
* Notify fcpim of the change in QoS state
*/
bfa_fcpim_update_ioredirect(bfa);
fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
}
-/**
+/*
* Configure default minimum ratelim speed
*/
bfa_status_t
return BFA_STATUS_OK;
}
-/**
+/*
* Get default minimum ratelim speed
*/
enum bfa_port_speed
}
-/**
+/*
* Rport State machine functions
*/
-/**
+/*
* Beginning state, only online event expected.
*/
static void
}
}
-/**
+/*
* Waiting for rport create response from firmware.
*/
static void
}
}
-/**
+/*
* Request queue is full, awaiting queue resume to send create request.
*/
static void
}
}
-/**
+/*
* Online state - normal parking state.
*/
static void
}
}
-/**
+/*
* Firmware rport is being deleted - awaiting f/w response.
*/
static void
}
}
-/**
+/*
* Offline state.
*/
static void
}
}
-/**
+/*
* Rport is deleted, waiting for firmware response to delete.
*/
static void
}
}
-/**
+/*
* Waiting for rport create response from firmware. A delete is pending.
*/
static void
}
}
-/**
+/*
* Waiting for rport create response from firmware. Rport offline is pending.
*/
static void
}
}
-/**
+/*
* IOC h/w failed.
*/
static void
-/**
+/*
* bfa_rport_private BFA rport private functions
*/
rp->rport_tag = i;
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
- /**
+ /*
* - is unused
*/
if (i)
bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
}
- /**
+ /*
* consume memory
*/
bfa_meminfo_kva(meminfo) = (u8 *) rp;
{
struct bfi_rport_create_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
m->vf_id = rp->rport_info.vf_id;
m->cisc = rp->rport_info.cisc;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
{
struct bfi_rport_delete_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
bfa_lpuid(rp->bfa));
m->fw_handle = rp->fw_handle;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
{
struct bfa_rport_speed_req_s *m;
- /**
+ /*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
m->fw_handle = rp->fw_handle;
m->speed = (u8)rp->rport_info.speed;
- /**
+ /*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
-/**
+/*
* bfa_rport_public
*/
-/**
+/*
* Rport interrupt processing.
*/
void
-/**
+/*
* bfa_rport_api
*/
{
bfa_assert(rport_info->max_frmsz != 0);
- /**
+ /*
* Some JBODs are seen to be not setting PDU size correctly in PLOGI
* responses. Default to minimum size.
*/
}
-/**
+/*
* SGPG related functions
*/
-/**
+/*
* Compute and return memory needed by FCP(im) module.
*/
static void
-/**
+/*
* hal_sgpg_public BFA SGPG public functions
*/
if (list_empty(&mod->sgpg_wait_q))
return;
- /**
+ /*
* satisfy as many waiting requests as possible
*/
do {
wqe->nsgpg_total = wqe->nsgpg = nsgpg;
- /**
+ /*
* allocate any left to this one first
*/
if (mod->free_sgpgs) {
- /**
+ /*
* no one else is waiting for SGPG
*/
bfa_assert(list_empty(&mod->sgpg_wait_q));
wqe->cbarg = cbarg;
}
-/**
+/*
* UF related functions
*/
/*
bfa_sge_to_be(&sge[1]);
}
- /**
+ /*
* advance pointer beyond consumed memory
*/
bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
list_add_tail(&uf->qe, &ufm->uf_free_q);
}
- /**
+ /*
* advance memory pointer
*/
bfa_meminfo_kva(mi) = (u8 *) uf;
-/**
+/*
* hal_uf_api
*/
-/**
+/*
* Register handler for all unsolicted recieve frames.
*
* @param[in] bfa BFA instance
ufm->cbarg = cbarg;
}
-/**
+/*
* Free an unsolicited frame back to BFA.
*
* @param[in] uf unsolicited frame to be freed
-/**
+/*
* uf_pub BFA uf module public functions
*/
void
* General Public License for more details.
*/
-/**
+/*
* bfad.c Linux driver PCI interface module.
*/
#include <linux/module.h>
static void
bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
-/**
+/*
* Beginning state for the driver instance, awaiting the pci_probe event
*/
static void
}
}
-/**
+/*
* Driver Instance is created, awaiting event INIT to initialize the bfad
*/
static void
}
}
-/**
+/*
* BFA callbacks
*/
void
complete(&fcomp->comp);
}
-/**
+/*
* bfa_init callback
*/
void
complete(&bfad->comp);
}
-/**
+/*
* BFA_FCS callbacks
*/
struct bfad_port_s *
}
}
-/**
+/*
* FCS RPORT alloc callback, after successful PLOGI by FCS
*/
bfa_status_t
return rc;
}
-/**
+/*
* FCS PBC VPORT Create
*/
void
return rc;
}
-/**
+/*
* Create a vport under a vf.
*/
bfa_status_t
return 0;
}
-/**
+/*
* BFA driver interrupt functions
*/
irqreturn_t
return IRQ_HANDLED;
}
-/**
+/*
* Initialize the MSIX entry table.
*/
static void
return 0;
}
-/**
+/*
* Setup MSIX based interrupt.
*/
int
}
}
-/**
+/*
* PCI probe entry.
*/
int
return error;
}
-/**
+/*
* PCI remove entry.
*/
void
.remove = __devexit_p(bfad_pci_remove),
};
-/**
+/*
* Driver module init.
*/
static int __init
return error;
}
-/**
+/*
* Driver module exit.
*/
static void __exit
* General Public License for more details.
*/
-/**
+/*
* bfa_attr.c Linux driver configuration interface module.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
-/**
+/*
* FC transport template entry, get SCSI target port ID.
*/
void
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, get SCSI target nwwn.
*/
void
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, get SCSI target pwwn.
*/
void
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, get SCSI host port ID.
*/
void
bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
}
-/**
+/*
* FC transport template entry, get SCSI host port type.
*/
static void
}
}
-/**
+/*
* FC transport template entry, get SCSI host port state.
*/
static void
}
}
-/**
+/*
* FC transport template entry, get SCSI host active fc4s.
*/
static void
fc_host_active_fc4s(shost)[7] = 1;
}
-/**
+/*
* FC transport template entry, get SCSI host link speed.
*/
static void
}
}
-/**
+/*
* FC transport template entry, get SCSI host port type.
*/
static void
}
-/**
+/*
* FC transport template entry, get BFAD statistics.
*/
static struct fc_host_statistics *
return hstats;
}
-/**
+/*
* FC transport template entry, reset BFAD statistics.
*/
static void
return;
}
-/**
+/*
* FC transport template entry, get rport loss timeout.
*/
static void
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* FC transport template entry, set rport loss timeout.
*/
static void
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
};
-/**
+/*
* Scsi_Host_attrs SCSI host attributes
*/
static ssize_t
* General Public License for more details.
*/
-/**
+/*
* bfad_im.c Linux driver IM module.
*/
wake_up(wq);
}
-/**
+/*
* Scsi_Host_template SCSI host template
*/
-/**
+/*
* Scsi_Host template entry, returns BFAD PCI info.
*/
static const char *
return bfa_buf;
}
-/**
+/*
* Scsi_Host template entry, aborts the specified SCSI command.
*
* Returns: SUCCESS or FAILED.
return rc;
}
-/**
+/*
* Scsi_Host template entry, resets a LUN and abort its all commands.
*
* Returns: SUCCESS or FAILED.
goto out;
}
- /**
+ /*
* Set host_scribble to NULL to avoid aborting a task command
* if happens.
*/
return rc;
}
-/**
+/*
* Scsi_Host template entry, resets the bus and abort all commands.
*/
static int
return SUCCESS;
}
-/**
+/*
* Scsi_Host template entry slave_destroy.
*/
static void
return;
}
-/**
+/*
* BFA FCS itnim callbacks
*/
-/**
+/*
* BFA FCS itnim alloc callback, after successful PRLI
* Context: Interrupt
*/
bfad->bfad_flags |= BFAD_RPORT_ONLINE;
}
-/**
+/*
* BFA FCS itnim free callback.
* Context: Interrupt. bfad_lock is held
*/
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
-/**
+/*
* BFA FCS itnim online callback.
* Context: Interrupt. bfad_lock is held
*/
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
-/**
+/*
* BFA FCS itnim offline callback.
* Context: Interrupt. bfad_lock is held
*/
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
-/**
+/*
* Allocate a Scsi_Host for a port.
*/
int
return BFA_STATUS_OK;
}
-/**
+/*
* Scsi_Host template entry.
*
* Description:
return NULL;
}
-/**
+/*
* Scsi_Host template entry slave_alloc
*/
static int
sprintf(fc_host_symbolic_name(host), "%s", symname);
fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
- fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
+ fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
}
static void
return;
}
-/**
+/*
* Work queue handler using FC transport service
* Context: kernel
*/
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
-/**
+/*
* Scsi_Host template entry, queue a SCSI command to the BFAD.
*/
static int