Merge branch 'for-linus' of git://neil.brown.name/md
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / lpfc / lpfc_els.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30
31 #include "lpfc_hw4.h"
32 #include "lpfc_hw.h"
33 #include "lpfc_sli.h"
34 #include "lpfc_sli4.h"
35 #include "lpfc_nl.h"
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
38 #include "lpfc.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
41 #include "lpfc_vport.h"
42 #include "lpfc_debugfs.h"
43
44 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
45 struct lpfc_iocbq *);
46 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
47 struct lpfc_iocbq *);
48 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
49 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
50 struct lpfc_nodelist *ndlp, uint8_t retry);
51 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
52 struct lpfc_iocbq *iocb);
53
54 static int lpfc_max_els_tries = 3;
55
56 /**
57 * lpfc_els_chk_latt - Check host link attention event for a vport
58 * @vport: pointer to a host virtual N_Port data structure.
59 *
60 * This routine checks whether there is an outstanding host link
61 * attention event during the discovery process with the @vport. It is done
62 * by reading the HBA's Host Attention (HA) register. If there is any host
63 * link attention events during this @vport's discovery process, the @vport
64 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
65 * be issued if the link state is not already in host link cleared state,
66 * and a return code shall indicate whether the host link attention event
67 * had happened.
68 *
69 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
70 * state in LPFC_VPORT_READY, the request for checking host link attention
71 * event will be ignored and a return code shall indicate no host link
72 * attention event had happened.
73 *
74 * Return codes
75 * 0 - no host link attention event happened
76 * 1 - host link attention event happened
77 **/
78 int
79 lpfc_els_chk_latt(struct lpfc_vport *vport)
80 {
81 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
82 struct lpfc_hba *phba = vport->phba;
83 uint32_t ha_copy;
84
85 if (vport->port_state >= LPFC_VPORT_READY ||
86 phba->link_state == LPFC_LINK_DOWN ||
87 phba->sli_rev > LPFC_SLI_REV3)
88 return 0;
89
90 /* Read the HBA Host Attention Register */
91 ha_copy = readl(phba->HAregaddr);
92
93 if (!(ha_copy & HA_LATT))
94 return 0;
95
96 /* Pending Link Event during Discovery */
97 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
98 "0237 Pending Link Event during "
99 "Discovery: State x%x\n",
100 phba->pport->port_state);
101
102 /* CLEAR_LA should re-enable link attention events and
103 * we should then imediately take a LATT event. The
104 * LATT processing should call lpfc_linkdown() which
105 * will cleanup any left over in-progress discovery
106 * events.
107 */
108 spin_lock_irq(shost->host_lock);
109 vport->fc_flag |= FC_ABORT_DISCOVERY;
110 spin_unlock_irq(shost->host_lock);
111
112 if (phba->link_state != LPFC_CLEAR_LA)
113 lpfc_issue_clear_la(phba, vport);
114
115 return 1;
116 }
117
118 /**
119 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
120 * @vport: pointer to a host virtual N_Port data structure.
121 * @expectRsp: flag indicating whether response is expected.
122 * @cmdSize: size of the ELS command.
123 * @retry: number of retries to the command IOCB when it fails.
124 * @ndlp: pointer to a node-list data structure.
125 * @did: destination identifier.
126 * @elscmd: the ELS command code.
127 *
128 * This routine is used for allocating a lpfc-IOCB data structure from
129 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
130 * passed into the routine for discovery state machine to issue an Extended
131 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
132 * and preparation routine that is used by all the discovery state machine
133 * routines and the ELS command-specific fields will be later set up by
134 * the individual discovery machine routines after calling this routine
135 * allocating and preparing a generic IOCB data structure. It fills in the
136 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
137 * payload and response payload (if expected). The reference count on the
138 * ndlp is incremented by 1 and the reference to the ndlp is put into
139 * context1 of the IOCB data structure for this IOCB to hold the ndlp
140 * reference for the command's callback function to access later.
141 *
142 * Return code
143 * Pointer to the newly allocated/prepared els iocb data structure
144 * NULL - when els iocb data structure allocation/preparation failed
145 **/
146 struct lpfc_iocbq *
147 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
148 uint16_t cmdSize, uint8_t retry,
149 struct lpfc_nodelist *ndlp, uint32_t did,
150 uint32_t elscmd)
151 {
152 struct lpfc_hba *phba = vport->phba;
153 struct lpfc_iocbq *elsiocb;
154 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
155 struct ulp_bde64 *bpl;
156 IOCB_t *icmd;
157
158
159 if (!lpfc_is_link_up(phba))
160 return NULL;
161
162 /* Allocate buffer for command iocb */
163 elsiocb = lpfc_sli_get_iocbq(phba);
164
165 if (elsiocb == NULL)
166 return NULL;
167
168 /*
169 * If this command is for fabric controller and HBA running
170 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
171 */
172 if ((did == Fabric_DID) &&
173 (phba->hba_flag & HBA_FIP_SUPPORT) &&
174 ((elscmd == ELS_CMD_FLOGI) ||
175 (elscmd == ELS_CMD_FDISC) ||
176 (elscmd == ELS_CMD_LOGO)))
177 switch (elscmd) {
178 case ELS_CMD_FLOGI:
179 elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
180 & LPFC_FIP_ELS_ID_MASK);
181 break;
182 case ELS_CMD_FDISC:
183 elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
184 & LPFC_FIP_ELS_ID_MASK);
185 break;
186 case ELS_CMD_LOGO:
187 elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
188 & LPFC_FIP_ELS_ID_MASK);
189 break;
190 }
191 else
192 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
193
194 icmd = &elsiocb->iocb;
195
196 /* fill in BDEs for command */
197 /* Allocate buffer for command payload */
198 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
199 if (pcmd)
200 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
201 if (!pcmd || !pcmd->virt)
202 goto els_iocb_free_pcmb_exit;
203
204 INIT_LIST_HEAD(&pcmd->list);
205
206 /* Allocate buffer for response payload */
207 if (expectRsp) {
208 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
209 if (prsp)
210 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
211 &prsp->phys);
212 if (!prsp || !prsp->virt)
213 goto els_iocb_free_prsp_exit;
214 INIT_LIST_HEAD(&prsp->list);
215 } else
216 prsp = NULL;
217
218 /* Allocate buffer for Buffer ptr list */
219 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
220 if (pbuflist)
221 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
222 &pbuflist->phys);
223 if (!pbuflist || !pbuflist->virt)
224 goto els_iocb_free_pbuf_exit;
225
226 INIT_LIST_HEAD(&pbuflist->list);
227
228 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
229 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
230 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
231 icmd->un.elsreq64.remoteID = did; /* DID */
232 if (expectRsp) {
233 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
234 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
235 icmd->ulpTimeout = phba->fc_ratov * 2;
236 } else {
237 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
238 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
239 }
240 icmd->ulpBdeCount = 1;
241 icmd->ulpLe = 1;
242 icmd->ulpClass = CLASS3;
243
244 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
245 icmd->un.elsreq64.myID = vport->fc_myDID;
246
247 /* For ELS_REQUEST64_CR, use the VPI by default */
248 icmd->ulpContext = vport->vpi + phba->vpi_base;
249 icmd->ulpCt_h = 0;
250 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
251 if (elscmd == ELS_CMD_ECHO)
252 icmd->ulpCt_l = 0; /* context = invalid RPI */
253 else
254 icmd->ulpCt_l = 1; /* context = VPI */
255 }
256
257 bpl = (struct ulp_bde64 *) pbuflist->virt;
258 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
259 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
260 bpl->tus.f.bdeSize = cmdSize;
261 bpl->tus.f.bdeFlags = 0;
262 bpl->tus.w = le32_to_cpu(bpl->tus.w);
263
264 if (expectRsp) {
265 bpl++;
266 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
267 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
268 bpl->tus.f.bdeSize = FCELSSIZE;
269 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
270 bpl->tus.w = le32_to_cpu(bpl->tus.w);
271 }
272
273 /* prevent preparing iocb with NULL ndlp reference */
274 elsiocb->context1 = lpfc_nlp_get(ndlp);
275 if (!elsiocb->context1)
276 goto els_iocb_free_pbuf_exit;
277 elsiocb->context2 = pcmd;
278 elsiocb->context3 = pbuflist;
279 elsiocb->retry = retry;
280 elsiocb->vport = vport;
281 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
282
283 if (prsp) {
284 list_add(&prsp->list, &pcmd->list);
285 }
286 if (expectRsp) {
287 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
288 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
289 "0116 Xmit ELS command x%x to remote "
290 "NPORT x%x I/O tag: x%x, port state: x%x\n",
291 elscmd, did, elsiocb->iotag,
292 vport->port_state);
293 } else {
294 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
295 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
296 "0117 Xmit ELS response x%x to remote "
297 "NPORT x%x I/O tag: x%x, size: x%x\n",
298 elscmd, ndlp->nlp_DID, elsiocb->iotag,
299 cmdSize);
300 }
301 return elsiocb;
302
303 els_iocb_free_pbuf_exit:
304 if (expectRsp)
305 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
306 kfree(pbuflist);
307
308 els_iocb_free_prsp_exit:
309 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
310 kfree(prsp);
311
312 els_iocb_free_pcmb_exit:
313 kfree(pcmd);
314 lpfc_sli_release_iocbq(phba, elsiocb);
315 return NULL;
316 }
317
318 /**
319 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
320 * @vport: pointer to a host virtual N_Port data structure.
321 *
322 * This routine issues a fabric registration login for a @vport. An
323 * active ndlp node with Fabric_DID must already exist for this @vport.
324 * The routine invokes two mailbox commands to carry out fabric registration
325 * login through the HBA firmware: the first mailbox command requests the
326 * HBA to perform link configuration for the @vport; and the second mailbox
327 * command requests the HBA to perform the actual fabric registration login
328 * with the @vport.
329 *
330 * Return code
331 * 0 - successfully issued fabric registration login for @vport
332 * -ENXIO -- failed to issue fabric registration login for @vport
333 **/
334 int
335 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
336 {
337 struct lpfc_hba *phba = vport->phba;
338 LPFC_MBOXQ_t *mbox;
339 struct lpfc_dmabuf *mp;
340 struct lpfc_nodelist *ndlp;
341 struct serv_parm *sp;
342 int rc;
343 int err = 0;
344
345 sp = &phba->fc_fabparam;
346 ndlp = lpfc_findnode_did(vport, Fabric_DID);
347 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
348 err = 1;
349 goto fail;
350 }
351
352 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
353 if (!mbox) {
354 err = 2;
355 goto fail;
356 }
357
358 vport->port_state = LPFC_FABRIC_CFG_LINK;
359 lpfc_config_link(phba, mbox);
360 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
361 mbox->vport = vport;
362
363 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
364 if (rc == MBX_NOT_FINISHED) {
365 err = 3;
366 goto fail_free_mbox;
367 }
368
369 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
370 if (!mbox) {
371 err = 4;
372 goto fail;
373 }
374 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
375 if (rc) {
376 err = 5;
377 goto fail_free_mbox;
378 }
379
380 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
381 mbox->vport = vport;
382 /* increment the reference count on ndlp to hold reference
383 * for the callback routine.
384 */
385 mbox->context2 = lpfc_nlp_get(ndlp);
386
387 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
388 if (rc == MBX_NOT_FINISHED) {
389 err = 6;
390 goto fail_issue_reg_login;
391 }
392
393 return 0;
394
395 fail_issue_reg_login:
396 /* decrement the reference count on ndlp just incremented
397 * for the failed mbox command.
398 */
399 lpfc_nlp_put(ndlp);
400 mp = (struct lpfc_dmabuf *) mbox->context1;
401 lpfc_mbuf_free(phba, mp->virt, mp->phys);
402 kfree(mp);
403 fail_free_mbox:
404 mempool_free(mbox, phba->mbox_mem_pool);
405
406 fail:
407 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
408 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
409 "0249 Cannot issue Register Fabric login: Err %d\n", err);
410 return -ENXIO;
411 }
412
413 /**
414 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
415 * @vport: pointer to a host virtual N_Port data structure.
416 *
417 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
418 * the @vport. This mailbox command is necessary for FCoE only.
419 *
420 * Return code
421 * 0 - successfully issued REG_VFI for @vport
422 * A failure code otherwise.
423 **/
424 static int
425 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
426 {
427 struct lpfc_hba *phba = vport->phba;
428 LPFC_MBOXQ_t *mboxq;
429 struct lpfc_nodelist *ndlp;
430 struct serv_parm *sp;
431 struct lpfc_dmabuf *dmabuf;
432 int rc = 0;
433
434 sp = &phba->fc_fabparam;
435 ndlp = lpfc_findnode_did(vport, Fabric_DID);
436 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
437 rc = -ENODEV;
438 goto fail;
439 }
440
441 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
442 if (!dmabuf) {
443 rc = -ENOMEM;
444 goto fail;
445 }
446 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
447 if (!dmabuf->virt) {
448 rc = -ENOMEM;
449 goto fail_free_dmabuf;
450 }
451 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
452 if (!mboxq) {
453 rc = -ENOMEM;
454 goto fail_free_coherent;
455 }
456 vport->port_state = LPFC_FABRIC_CFG_LINK;
457 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
458 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
459 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
460 mboxq->vport = vport;
461 mboxq->context1 = dmabuf;
462 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
463 if (rc == MBX_NOT_FINISHED) {
464 rc = -ENXIO;
465 goto fail_free_mbox;
466 }
467 return 0;
468
469 fail_free_mbox:
470 mempool_free(mboxq, phba->mbox_mem_pool);
471 fail_free_coherent:
472 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
473 fail_free_dmabuf:
474 kfree(dmabuf);
475 fail:
476 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
477 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
478 "0289 Issue Register VFI failed: Err %d\n", rc);
479 return rc;
480 }
481
482 /**
483 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
484 * @vport: pointer to a host virtual N_Port data structure.
485 * @ndlp: pointer to a node-list data structure.
486 * @sp: pointer to service parameter data structure.
487 * @irsp: pointer to the IOCB within the lpfc response IOCB.
488 *
489 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
490 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
491 * port in a fabric topology. It properly sets up the parameters to the @ndlp
492 * from the IOCB response. It also check the newly assigned N_Port ID to the
493 * @vport against the previously assigned N_Port ID. If it is different from
494 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
495 * is invoked on all the remaining nodes with the @vport to unregister the
496 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
497 * is invoked to register login to the fabric.
498 *
499 * Return code
500 * 0 - Success (currently, always return 0)
501 **/
502 static int
503 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
504 struct serv_parm *sp, IOCB_t *irsp)
505 {
506 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
507 struct lpfc_hba *phba = vport->phba;
508 struct lpfc_nodelist *np;
509 struct lpfc_nodelist *next_np;
510
511 spin_lock_irq(shost->host_lock);
512 vport->fc_flag |= FC_FABRIC;
513 spin_unlock_irq(shost->host_lock);
514
515 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
516 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
517 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
518
519 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
520
521 if (phba->fc_topology == TOPOLOGY_LOOP) {
522 spin_lock_irq(shost->host_lock);
523 vport->fc_flag |= FC_PUBLIC_LOOP;
524 spin_unlock_irq(shost->host_lock);
525 } else {
526 /*
527 * If we are a N-port connected to a Fabric, fixup sparam's so
528 * logins to devices on remote loops work.
529 */
530 vport->fc_sparam.cmn.altBbCredit = 1;
531 }
532
533 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
534 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
535 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
536 ndlp->nlp_class_sup = 0;
537 if (sp->cls1.classValid)
538 ndlp->nlp_class_sup |= FC_COS_CLASS1;
539 if (sp->cls2.classValid)
540 ndlp->nlp_class_sup |= FC_COS_CLASS2;
541 if (sp->cls3.classValid)
542 ndlp->nlp_class_sup |= FC_COS_CLASS3;
543 if (sp->cls4.classValid)
544 ndlp->nlp_class_sup |= FC_COS_CLASS4;
545 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
546 sp->cmn.bbRcvSizeLsb;
547 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
548
549 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
550 if (sp->cmn.response_multiple_NPort) {
551 lpfc_printf_vlog(vport, KERN_WARNING,
552 LOG_ELS | LOG_VPORT,
553 "1816 FLOGI NPIV supported, "
554 "response data 0x%x\n",
555 sp->cmn.response_multiple_NPort);
556 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
557 } else {
558 /* Because we asked f/w for NPIV it still expects us
559 to call reg_vnpid atleast for the physcial host */
560 lpfc_printf_vlog(vport, KERN_WARNING,
561 LOG_ELS | LOG_VPORT,
562 "1817 Fabric does not support NPIV "
563 "- configuring single port mode.\n");
564 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
565 }
566 }
567
568 if ((vport->fc_prevDID != vport->fc_myDID) &&
569 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
570
571 /* If our NportID changed, we need to ensure all
572 * remaining NPORTs get unreg_login'ed.
573 */
574 list_for_each_entry_safe(np, next_np,
575 &vport->fc_nodes, nlp_listp) {
576 if (!NLP_CHK_NODE_ACT(np))
577 continue;
578 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
579 !(np->nlp_flag & NLP_NPR_ADISC))
580 continue;
581 spin_lock_irq(shost->host_lock);
582 np->nlp_flag &= ~NLP_NPR_ADISC;
583 spin_unlock_irq(shost->host_lock);
584 lpfc_unreg_rpi(vport, np);
585 }
586 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
587 lpfc_mbx_unreg_vpi(vport);
588 spin_lock_irq(shost->host_lock);
589 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
590 spin_unlock_irq(shost->host_lock);
591 }
592 /*
593 * If VPI is unreged, driver need to do INIT_VPI
594 * before re-registering
595 */
596 if (phba->sli_rev == LPFC_SLI_REV4) {
597 spin_lock_irq(shost->host_lock);
598 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
599 spin_unlock_irq(shost->host_lock);
600 }
601 }
602
603 if (phba->sli_rev < LPFC_SLI_REV4) {
604 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
605 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
606 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
607 lpfc_register_new_vport(phba, vport, ndlp);
608 else
609 lpfc_issue_fabric_reglogin(vport);
610 } else {
611 ndlp->nlp_type |= NLP_FABRIC;
612 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
613 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
614 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
615 lpfc_start_fdiscs(phba);
616 lpfc_do_scr_ns_plogi(phba, vport);
617 } else if (vport->fc_flag & FC_VFI_REGISTERED)
618 lpfc_issue_init_vpi(vport);
619 else
620 lpfc_issue_reg_vfi(vport);
621 }
622 return 0;
623 }
624 /**
625 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
626 * @vport: pointer to a host virtual N_Port data structure.
627 * @ndlp: pointer to a node-list data structure.
628 * @sp: pointer to service parameter data structure.
629 *
630 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
631 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
632 * in a point-to-point topology. First, the @vport's N_Port Name is compared
633 * with the received N_Port Name: if the @vport's N_Port Name is greater than
634 * the received N_Port Name lexicographically, this node shall assign local
635 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
636 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
637 * this node shall just wait for the remote node to issue PLOGI and assign
638 * N_Port IDs.
639 *
640 * Return code
641 * 0 - Success
642 * -ENXIO - Fail
643 **/
644 static int
645 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
646 struct serv_parm *sp)
647 {
648 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
649 struct lpfc_hba *phba = vport->phba;
650 LPFC_MBOXQ_t *mbox;
651 int rc;
652
653 spin_lock_irq(shost->host_lock);
654 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
655 spin_unlock_irq(shost->host_lock);
656
657 phba->fc_edtov = FF_DEF_EDTOV;
658 phba->fc_ratov = FF_DEF_RATOV;
659 rc = memcmp(&vport->fc_portname, &sp->portName,
660 sizeof(vport->fc_portname));
661 if (rc >= 0) {
662 /* This side will initiate the PLOGI */
663 spin_lock_irq(shost->host_lock);
664 vport->fc_flag |= FC_PT2PT_PLOGI;
665 spin_unlock_irq(shost->host_lock);
666
667 /*
668 * N_Port ID cannot be 0, set our to LocalID the other
669 * side will be RemoteID.
670 */
671
672 /* not equal */
673 if (rc)
674 vport->fc_myDID = PT2PT_LocalID;
675
676 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
677 if (!mbox)
678 goto fail;
679
680 lpfc_config_link(phba, mbox);
681
682 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
683 mbox->vport = vport;
684 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
685 if (rc == MBX_NOT_FINISHED) {
686 mempool_free(mbox, phba->mbox_mem_pool);
687 goto fail;
688 }
689 /* Decrement ndlp reference count indicating that ndlp can be
690 * safely released when other references to it are done.
691 */
692 lpfc_nlp_put(ndlp);
693
694 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
695 if (!ndlp) {
696 /*
697 * Cannot find existing Fabric ndlp, so allocate a
698 * new one
699 */
700 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
701 if (!ndlp)
702 goto fail;
703 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
704 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
705 ndlp = lpfc_enable_node(vport, ndlp,
706 NLP_STE_UNUSED_NODE);
707 if(!ndlp)
708 goto fail;
709 }
710
711 memcpy(&ndlp->nlp_portname, &sp->portName,
712 sizeof(struct lpfc_name));
713 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
714 sizeof(struct lpfc_name));
715 /* Set state will put ndlp onto node list if not already done */
716 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
717 spin_lock_irq(shost->host_lock);
718 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
719 spin_unlock_irq(shost->host_lock);
720 } else
721 /* This side will wait for the PLOGI, decrement ndlp reference
722 * count indicating that ndlp can be released when other
723 * references to it are done.
724 */
725 lpfc_nlp_put(ndlp);
726
727 /* If we are pt2pt with another NPort, force NPIV off! */
728 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
729
730 spin_lock_irq(shost->host_lock);
731 vport->fc_flag |= FC_PT2PT;
732 spin_unlock_irq(shost->host_lock);
733
734 /* Start discovery - this should just do CLEAR_LA */
735 lpfc_disc_start(vport);
736 return 0;
737 fail:
738 return -ENXIO;
739 }
740
741 /**
742 * lpfc_cmpl_els_flogi - Completion callback function for flogi
743 * @phba: pointer to lpfc hba data structure.
744 * @cmdiocb: pointer to lpfc command iocb data structure.
745 * @rspiocb: pointer to lpfc response iocb data structure.
746 *
747 * This routine is the top-level completion callback function for issuing
748 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
749 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
750 * retry has been made (either immediately or delayed with lpfc_els_retry()
751 * returning 1), the command IOCB will be released and function returned.
752 * If the retry attempt has been given up (possibly reach the maximum
753 * number of retries), one additional decrement of ndlp reference shall be
754 * invoked before going out after releasing the command IOCB. This will
755 * actually release the remote node (Note, lpfc_els_free_iocb() will also
756 * invoke one decrement of ndlp reference count). If no error reported in
757 * the IOCB status, the command Port ID field is used to determine whether
758 * this is a point-to-point topology or a fabric topology: if the Port ID
759 * field is assigned, it is a fabric topology; otherwise, it is a
760 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
761 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
762 * specific topology completion conditions.
763 **/
764 static void
765 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
766 struct lpfc_iocbq *rspiocb)
767 {
768 struct lpfc_vport *vport = cmdiocb->vport;
769 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
770 IOCB_t *irsp = &rspiocb->iocb;
771 struct lpfc_nodelist *ndlp = cmdiocb->context1;
772 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
773 struct serv_parm *sp;
774 uint16_t fcf_index;
775 int rc;
776
777 /* Check to see if link went down during discovery */
778 if (lpfc_els_chk_latt(vport)) {
779 /* One additional decrement on node reference count to
780 * trigger the release of the node
781 */
782 lpfc_nlp_put(ndlp);
783 goto out;
784 }
785
786 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
787 "FLOGI cmpl: status:x%x/x%x state:x%x",
788 irsp->ulpStatus, irsp->un.ulpWord[4],
789 vport->port_state);
790
791 if (irsp->ulpStatus) {
792 /*
793 * In case of FIP mode, perform round robin FCF failover
794 * due to new FCF discovery
795 */
796 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
797 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
798 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
799 "2611 FLOGI failed on registered "
800 "FCF record fcf_index:%d, trying "
801 "to perform round robin failover\n",
802 phba->fcf.current_rec.fcf_indx);
803 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
804 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
805 /*
806 * Exhausted the eligible FCF record list,
807 * fail through to retry FLOGI on current
808 * FCF record.
809 */
810 lpfc_printf_log(phba, KERN_WARNING,
811 LOG_FIP | LOG_ELS,
812 "2760 FLOGI exhausted FCF "
813 "round robin failover list, "
814 "retry FLOGI on the current "
815 "registered FCF index:%d\n",
816 phba->fcf.current_rec.fcf_indx);
817 spin_lock_irq(&phba->hbalock);
818 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
819 spin_unlock_irq(&phba->hbalock);
820 } else {
821 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
822 fcf_index);
823 if (rc) {
824 lpfc_printf_log(phba, KERN_WARNING,
825 LOG_FIP | LOG_ELS,
826 "2761 FLOGI round "
827 "robin FCF failover "
828 "read FCF failed "
829 "rc:x%x, fcf_index:"
830 "%d\n", rc,
831 phba->fcf.current_rec.fcf_indx);
832 spin_lock_irq(&phba->hbalock);
833 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
834 spin_unlock_irq(&phba->hbalock);
835 } else
836 goto out;
837 }
838 }
839
840 /* Check for retry */
841 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
842 goto out;
843
844 /* FLOGI failed, so there is no fabric */
845 spin_lock_irq(shost->host_lock);
846 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
847 spin_unlock_irq(shost->host_lock);
848
849 /* If private loop, then allow max outstanding els to be
850 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
851 * alpa map would take too long otherwise.
852 */
853 if (phba->alpa_map[0] == 0) {
854 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
855 }
856
857 /* FLOGI failure */
858 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
859 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
860 irsp->ulpStatus, irsp->un.ulpWord[4],
861 irsp->ulpTimeout);
862 goto flogifail;
863 }
864 spin_lock_irq(shost->host_lock);
865 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
866 spin_unlock_irq(shost->host_lock);
867
868 /*
869 * The FLogI succeeded. Sync the data for the CPU before
870 * accessing it.
871 */
872 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
873
874 sp = prsp->virt + sizeof(uint32_t);
875
876 /* FLOGI completes successfully */
877 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
878 "0101 FLOGI completes successfully "
879 "Data: x%x x%x x%x x%x\n",
880 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
881 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
882
883 if (vport->port_state == LPFC_FLOGI) {
884 /*
885 * If Common Service Parameters indicate Nport
886 * we are point to point, if Fport we are Fabric.
887 */
888 if (sp->cmn.fPort)
889 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
890 else
891 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
892
893 if (!rc) {
894 /* Mark the FCF discovery process done */
895 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
896 "2769 FLOGI successful on FCF record: "
897 "current_fcf_index:x%x, terminate FCF "
898 "round robin failover process\n",
899 phba->fcf.current_rec.fcf_indx);
900 spin_lock_irq(&phba->hbalock);
901 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
902 spin_unlock_irq(&phba->hbalock);
903 goto out;
904 }
905 }
906
907 flogifail:
908 lpfc_nlp_put(ndlp);
909
910 if (!lpfc_error_lost_link(irsp)) {
911 /* FLOGI failed, so just use loop map to make discovery list */
912 lpfc_disc_list_loopmap(vport);
913
914 /* Start discovery */
915 lpfc_disc_start(vport);
916 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
917 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
918 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
919 (phba->link_state != LPFC_CLEAR_LA)) {
920 /* If FLOGI failed enable link interrupt. */
921 lpfc_issue_clear_la(phba, vport);
922 }
923 out:
924 lpfc_els_free_iocb(phba, cmdiocb);
925 }
926
927 /**
928 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
929 * @vport: pointer to a host virtual N_Port data structure.
930 * @ndlp: pointer to a node-list data structure.
931 * @retry: number of retries to the command IOCB.
932 *
933 * This routine issues a Fabric Login (FLOGI) Request ELS command
934 * for a @vport. The initiator service parameters are put into the payload
935 * of the FLOGI Request IOCB and the top-level callback function pointer
936 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
937 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
938 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
939 *
940 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
941 * will be incremented by 1 for holding the ndlp and the reference to ndlp
942 * will be stored into the context1 field of the IOCB for the completion
943 * callback function to the FLOGI ELS command.
944 *
945 * Return code
946 * 0 - successfully issued flogi iocb for @vport
947 * 1 - failed to issue flogi iocb for @vport
948 **/
949 static int
950 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
951 uint8_t retry)
952 {
953 struct lpfc_hba *phba = vport->phba;
954 struct serv_parm *sp;
955 IOCB_t *icmd;
956 struct lpfc_iocbq *elsiocb;
957 struct lpfc_sli_ring *pring;
958 uint8_t *pcmd;
959 uint16_t cmdsize;
960 uint32_t tmo;
961 int rc;
962
963 pring = &phba->sli.ring[LPFC_ELS_RING];
964
965 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
966 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
967 ndlp->nlp_DID, ELS_CMD_FLOGI);
968
969 if (!elsiocb)
970 return 1;
971
972 icmd = &elsiocb->iocb;
973 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
974
975 /* For FLOGI request, remainder of payload is service parameters */
976 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
977 pcmd += sizeof(uint32_t);
978 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
979 sp = (struct serv_parm *) pcmd;
980
981 /* Setup CSPs accordingly for Fabric */
982 sp->cmn.e_d_tov = 0;
983 sp->cmn.w2.r_a_tov = 0;
984 sp->cls1.classValid = 0;
985 sp->cls2.seqDelivery = 1;
986 sp->cls3.seqDelivery = 1;
987 if (sp->cmn.fcphLow < FC_PH3)
988 sp->cmn.fcphLow = FC_PH3;
989 if (sp->cmn.fcphHigh < FC_PH3)
990 sp->cmn.fcphHigh = FC_PH3;
991
992 if (phba->sli_rev == LPFC_SLI_REV4) {
993 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
994 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
995 /* FLOGI needs to be 3 for WQE FCFI */
996 /* Set the fcfi to the fcfi we registered with */
997 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
998 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
999 sp->cmn.request_multiple_Nport = 1;
1000 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1001 icmd->ulpCt_h = 1;
1002 icmd->ulpCt_l = 0;
1003 }
1004
1005 if (phba->fc_topology != TOPOLOGY_LOOP) {
1006 icmd->un.elsreq64.myID = 0;
1007 icmd->un.elsreq64.fl = 1;
1008 }
1009
1010 tmo = phba->fc_ratov;
1011 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1012 lpfc_set_disctmo(vport);
1013 phba->fc_ratov = tmo;
1014
1015 phba->fc_stat.elsXmitFLOGI++;
1016 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1017
1018 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1019 "Issue FLOGI: opt:x%x",
1020 phba->sli3_options, 0, 0);
1021
1022 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1023 if (rc == IOCB_ERROR) {
1024 lpfc_els_free_iocb(phba, elsiocb);
1025 return 1;
1026 }
1027 return 0;
1028 }
1029
1030 /**
1031 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1032 * @phba: pointer to lpfc hba data structure.
1033 *
1034 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1035 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1036 * list and issues an abort IOCB commond on each outstanding IOCB that
1037 * contains a active Fabric_DID ndlp. Note that this function is to issue
1038 * the abort IOCB command on all the outstanding IOCBs, thus when this
1039 * function returns, it does not guarantee all the IOCBs are actually aborted.
1040 *
1041 * Return code
1042 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1043 **/
1044 int
1045 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1046 {
1047 struct lpfc_sli_ring *pring;
1048 struct lpfc_iocbq *iocb, *next_iocb;
1049 struct lpfc_nodelist *ndlp;
1050 IOCB_t *icmd;
1051
1052 /* Abort outstanding I/O on NPort <nlp_DID> */
1053 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1054 "0201 Abort outstanding I/O on NPort x%x\n",
1055 Fabric_DID);
1056
1057 pring = &phba->sli.ring[LPFC_ELS_RING];
1058
1059 /*
1060 * Check the txcmplq for an iocb that matches the nport the driver is
1061 * searching for.
1062 */
1063 spin_lock_irq(&phba->hbalock);
1064 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1065 icmd = &iocb->iocb;
1066 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
1067 icmd->un.elsreq64.bdl.ulpIoTag32) {
1068 ndlp = (struct lpfc_nodelist *)(iocb->context1);
1069 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1070 (ndlp->nlp_DID == Fabric_DID))
1071 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1072 }
1073 }
1074 spin_unlock_irq(&phba->hbalock);
1075
1076 return 0;
1077 }
1078
1079 /**
1080 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1081 * @vport: pointer to a host virtual N_Port data structure.
1082 *
1083 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1084 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1085 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1086 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1087 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1088 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1089 * @vport.
1090 *
1091 * Return code
1092 * 0 - failed to issue initial flogi for @vport
1093 * 1 - successfully issued initial flogi for @vport
1094 **/
1095 int
1096 lpfc_initial_flogi(struct lpfc_vport *vport)
1097 {
1098 struct lpfc_hba *phba = vport->phba;
1099 struct lpfc_nodelist *ndlp;
1100
1101 vport->port_state = LPFC_FLOGI;
1102 lpfc_set_disctmo(vport);
1103
1104 /* First look for the Fabric ndlp */
1105 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1106 if (!ndlp) {
1107 /* Cannot find existing Fabric ndlp, so allocate a new one */
1108 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1109 if (!ndlp)
1110 return 0;
1111 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1112 /* Set the node type */
1113 ndlp->nlp_type |= NLP_FABRIC;
1114 /* Put ndlp onto node list */
1115 lpfc_enqueue_node(vport, ndlp);
1116 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1117 /* re-setup ndlp without removing from node list */
1118 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1119 if (!ndlp)
1120 return 0;
1121 }
1122
1123 if (lpfc_issue_els_flogi(vport, ndlp, 0))
1124 /* This decrement of reference count to node shall kick off
1125 * the release of the node.
1126 */
1127 lpfc_nlp_put(ndlp);
1128
1129 return 1;
1130 }
1131
1132 /**
1133 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1134 * @vport: pointer to a host virtual N_Port data structure.
1135 *
1136 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1137 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1138 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1139 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1140 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1141 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1142 * @vport.
1143 *
1144 * Return code
1145 * 0 - failed to issue initial fdisc for @vport
1146 * 1 - successfully issued initial fdisc for @vport
1147 **/
1148 int
1149 lpfc_initial_fdisc(struct lpfc_vport *vport)
1150 {
1151 struct lpfc_hba *phba = vport->phba;
1152 struct lpfc_nodelist *ndlp;
1153
1154 /* First look for the Fabric ndlp */
1155 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1156 if (!ndlp) {
1157 /* Cannot find existing Fabric ndlp, so allocate a new one */
1158 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1159 if (!ndlp)
1160 return 0;
1161 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1162 /* Put ndlp onto node list */
1163 lpfc_enqueue_node(vport, ndlp);
1164 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1165 /* re-setup ndlp without removing from node list */
1166 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1167 if (!ndlp)
1168 return 0;
1169 }
1170
1171 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1172 /* decrement node reference count to trigger the release of
1173 * the node.
1174 */
1175 lpfc_nlp_put(ndlp);
1176 return 0;
1177 }
1178 return 1;
1179 }
1180
1181 /**
1182 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1183 * @vport: pointer to a host virtual N_Port data structure.
1184 *
1185 * This routine checks whether there are more remaining Port Logins
1186 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1187 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1188 * to issue ELS PLOGIs up to the configured discover threads with the
1189 * @vport (@vport->cfg_discovery_threads). The function also decrement
1190 * the @vport's num_disc_node by 1 if it is not already 0.
1191 **/
1192 void
1193 lpfc_more_plogi(struct lpfc_vport *vport)
1194 {
1195 int sentplogi;
1196
1197 if (vport->num_disc_nodes)
1198 vport->num_disc_nodes--;
1199
1200 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1201 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1202 "0232 Continue discovery with %d PLOGIs to go "
1203 "Data: x%x x%x x%x\n",
1204 vport->num_disc_nodes, vport->fc_plogi_cnt,
1205 vport->fc_flag, vport->port_state);
1206 /* Check to see if there are more PLOGIs to be sent */
1207 if (vport->fc_flag & FC_NLP_MORE)
1208 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1209 sentplogi = lpfc_els_disc_plogi(vport);
1210
1211 return;
1212 }
1213
1214 /**
1215 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1216 * @phba: pointer to lpfc hba data structure.
1217 * @prsp: pointer to response IOCB payload.
1218 * @ndlp: pointer to a node-list data structure.
1219 *
1220 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1221 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1222 * The following cases are considered N_Port confirmed:
1223 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1224 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1225 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1226 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1227 * 1) if there is a node on vport list other than the @ndlp with the same
1228 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1229 * on that node to release the RPI associated with the node; 2) if there is
1230 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1231 * into, a new node shall be allocated (or activated). In either case, the
1232 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1233 * be released and the new_ndlp shall be put on to the vport node list and
1234 * its pointer returned as the confirmed node.
1235 *
1236 * Note that before the @ndlp got "released", the keepDID from not-matching
1237 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1238 * of the @ndlp. This is because the release of @ndlp is actually to put it
1239 * into an inactive state on the vport node list and the vport node list
1240 * management algorithm does not allow two node with a same DID.
1241 *
1242 * Return code
1243 * pointer to the PLOGI N_Port @ndlp
1244 **/
1245 static struct lpfc_nodelist *
1246 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1247 struct lpfc_nodelist *ndlp)
1248 {
1249 struct lpfc_vport *vport = ndlp->vport;
1250 struct lpfc_nodelist *new_ndlp;
1251 struct lpfc_rport_data *rdata;
1252 struct fc_rport *rport;
1253 struct serv_parm *sp;
1254 uint8_t name[sizeof(struct lpfc_name)];
1255 uint32_t rc, keepDID = 0;
1256
1257 /* Fabric nodes can have the same WWPN so we don't bother searching
1258 * by WWPN. Just return the ndlp that was given to us.
1259 */
1260 if (ndlp->nlp_type & NLP_FABRIC)
1261 return ndlp;
1262
1263 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1264 memset(name, 0, sizeof(struct lpfc_name));
1265
1266 /* Now we find out if the NPort we are logging into, matches the WWPN
1267 * we have for that ndlp. If not, we have some work to do.
1268 */
1269 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1270
1271 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1272 return ndlp;
1273
1274 if (!new_ndlp) {
1275 rc = memcmp(&ndlp->nlp_portname, name,
1276 sizeof(struct lpfc_name));
1277 if (!rc)
1278 return ndlp;
1279 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1280 if (!new_ndlp)
1281 return ndlp;
1282 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
1283 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1284 rc = memcmp(&ndlp->nlp_portname, name,
1285 sizeof(struct lpfc_name));
1286 if (!rc)
1287 return ndlp;
1288 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1289 NLP_STE_UNUSED_NODE);
1290 if (!new_ndlp)
1291 return ndlp;
1292 keepDID = new_ndlp->nlp_DID;
1293 } else
1294 keepDID = new_ndlp->nlp_DID;
1295
1296 lpfc_unreg_rpi(vport, new_ndlp);
1297 new_ndlp->nlp_DID = ndlp->nlp_DID;
1298 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1299
1300 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1301 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1302 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1303
1304 /* Set state will put new_ndlp on to node list if not already done */
1305 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1306
1307 /* Move this back to NPR state */
1308 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1309 /* The new_ndlp is replacing ndlp totally, so we need
1310 * to put ndlp on UNUSED list and try to free it.
1311 */
1312
1313 /* Fix up the rport accordingly */
1314 rport = ndlp->rport;
1315 if (rport) {
1316 rdata = rport->dd_data;
1317 if (rdata->pnode == ndlp) {
1318 lpfc_nlp_put(ndlp);
1319 ndlp->rport = NULL;
1320 rdata->pnode = lpfc_nlp_get(new_ndlp);
1321 new_ndlp->rport = rport;
1322 }
1323 new_ndlp->nlp_type = ndlp->nlp_type;
1324 }
1325 /* We shall actually free the ndlp with both nlp_DID and
1326 * nlp_portname fields equals 0 to avoid any ndlp on the
1327 * nodelist never to be used.
1328 */
1329 if (ndlp->nlp_DID == 0) {
1330 spin_lock_irq(&phba->ndlp_lock);
1331 NLP_SET_FREE_REQ(ndlp);
1332 spin_unlock_irq(&phba->ndlp_lock);
1333 }
1334
1335 /* Two ndlps cannot have the same did on the nodelist */
1336 ndlp->nlp_DID = keepDID;
1337 lpfc_drop_node(vport, ndlp);
1338 }
1339 else {
1340 lpfc_unreg_rpi(vport, ndlp);
1341 /* Two ndlps cannot have the same did */
1342 ndlp->nlp_DID = keepDID;
1343 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1344 }
1345 return new_ndlp;
1346 }
1347
1348 /**
1349 * lpfc_end_rscn - Check and handle more rscn for a vport
1350 * @vport: pointer to a host virtual N_Port data structure.
1351 *
1352 * This routine checks whether more Registration State Change
1353 * Notifications (RSCNs) came in while the discovery state machine was in
1354 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1355 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1356 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1357 * handling the RSCNs.
1358 **/
1359 void
1360 lpfc_end_rscn(struct lpfc_vport *vport)
1361 {
1362 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1363
1364 if (vport->fc_flag & FC_RSCN_MODE) {
1365 /*
1366 * Check to see if more RSCNs came in while we were
1367 * processing this one.
1368 */
1369 if (vport->fc_rscn_id_cnt ||
1370 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1371 lpfc_els_handle_rscn(vport);
1372 else {
1373 spin_lock_irq(shost->host_lock);
1374 vport->fc_flag &= ~FC_RSCN_MODE;
1375 spin_unlock_irq(shost->host_lock);
1376 }
1377 }
1378 }
1379
1380 /**
1381 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1382 * @phba: pointer to lpfc hba data structure.
1383 * @cmdiocb: pointer to lpfc command iocb data structure.
1384 * @rspiocb: pointer to lpfc response iocb data structure.
1385 *
1386 * This routine is the completion callback function for issuing the Port
1387 * Login (PLOGI) command. For PLOGI completion, there must be an active
1388 * ndlp on the vport node list that matches the remote node ID from the
1389 * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
1390 * ignored and command IOCB released. The PLOGI response IOCB status is
1391 * checked for error conditons. If there is error status reported, PLOGI
1392 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1393 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1394 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1395 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1396 * there are additional N_Port nodes with the vport that need to perform
1397 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1398 * PLOGIs.
1399 **/
1400 static void
1401 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1402 struct lpfc_iocbq *rspiocb)
1403 {
1404 struct lpfc_vport *vport = cmdiocb->vport;
1405 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1406 IOCB_t *irsp;
1407 struct lpfc_nodelist *ndlp;
1408 struct lpfc_dmabuf *prsp;
1409 int disc, rc, did, type;
1410
1411 /* we pass cmdiocb to state machine which needs rspiocb as well */
1412 cmdiocb->context_un.rsp_iocb = rspiocb;
1413
1414 irsp = &rspiocb->iocb;
1415 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1416 "PLOGI cmpl: status:x%x/x%x did:x%x",
1417 irsp->ulpStatus, irsp->un.ulpWord[4],
1418 irsp->un.elsreq64.remoteID);
1419
1420 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1421 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1422 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1423 "0136 PLOGI completes to NPort x%x "
1424 "with no ndlp. Data: x%x x%x x%x\n",
1425 irsp->un.elsreq64.remoteID,
1426 irsp->ulpStatus, irsp->un.ulpWord[4],
1427 irsp->ulpIoTag);
1428 goto out;
1429 }
1430
1431 /* Since ndlp can be freed in the disc state machine, note if this node
1432 * is being used during discovery.
1433 */
1434 spin_lock_irq(shost->host_lock);
1435 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1436 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1437 spin_unlock_irq(shost->host_lock);
1438 rc = 0;
1439
1440 /* PLOGI completes to NPort <nlp_DID> */
1441 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1442 "0102 PLOGI completes to NPort x%x "
1443 "Data: x%x x%x x%x x%x x%x\n",
1444 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1445 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1446 /* Check to see if link went down during discovery */
1447 if (lpfc_els_chk_latt(vport)) {
1448 spin_lock_irq(shost->host_lock);
1449 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1450 spin_unlock_irq(shost->host_lock);
1451 goto out;
1452 }
1453
1454 /* ndlp could be freed in DSM, save these values now */
1455 type = ndlp->nlp_type;
1456 did = ndlp->nlp_DID;
1457
1458 if (irsp->ulpStatus) {
1459 /* Check for retry */
1460 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1461 /* ELS command is being retried */
1462 if (disc) {
1463 spin_lock_irq(shost->host_lock);
1464 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1465 spin_unlock_irq(shost->host_lock);
1466 }
1467 goto out;
1468 }
1469 /* PLOGI failed */
1470 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1471 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1472 ndlp->nlp_DID, irsp->ulpStatus,
1473 irsp->un.ulpWord[4]);
1474 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1475 if (lpfc_error_lost_link(irsp))
1476 rc = NLP_STE_FREED_NODE;
1477 else
1478 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1479 NLP_EVT_CMPL_PLOGI);
1480 } else {
1481 /* Good status, call state machine */
1482 prsp = list_entry(((struct lpfc_dmabuf *)
1483 cmdiocb->context2)->list.next,
1484 struct lpfc_dmabuf, list);
1485 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1486 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1487 NLP_EVT_CMPL_PLOGI);
1488 }
1489
1490 if (disc && vport->num_disc_nodes) {
1491 /* Check to see if there are more PLOGIs to be sent */
1492 lpfc_more_plogi(vport);
1493
1494 if (vport->num_disc_nodes == 0) {
1495 spin_lock_irq(shost->host_lock);
1496 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1497 spin_unlock_irq(shost->host_lock);
1498
1499 lpfc_can_disctmo(vport);
1500 lpfc_end_rscn(vport);
1501 }
1502 }
1503
1504 out:
1505 lpfc_els_free_iocb(phba, cmdiocb);
1506 return;
1507 }
1508
1509 /**
1510 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1511 * @vport: pointer to a host virtual N_Port data structure.
1512 * @did: destination port identifier.
1513 * @retry: number of retries to the command IOCB.
1514 *
1515 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1516 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1517 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1518 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1519 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1520 *
1521 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1522 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1523 * will be stored into the context1 field of the IOCB for the completion
1524 * callback function to the PLOGI ELS command.
1525 *
1526 * Return code
1527 * 0 - Successfully issued a plogi for @vport
1528 * 1 - failed to issue a plogi for @vport
1529 **/
1530 int
1531 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1532 {
1533 struct lpfc_hba *phba = vport->phba;
1534 struct serv_parm *sp;
1535 IOCB_t *icmd;
1536 struct lpfc_nodelist *ndlp;
1537 struct lpfc_iocbq *elsiocb;
1538 struct lpfc_sli *psli;
1539 uint8_t *pcmd;
1540 uint16_t cmdsize;
1541 int ret;
1542
1543 psli = &phba->sli;
1544
1545 ndlp = lpfc_findnode_did(vport, did);
1546 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1547 ndlp = NULL;
1548
1549 /* If ndlp is not NULL, we will bump the reference count on it */
1550 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1551 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1552 ELS_CMD_PLOGI);
1553 if (!elsiocb)
1554 return 1;
1555
1556 icmd = &elsiocb->iocb;
1557 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1558
1559 /* For PLOGI request, remainder of payload is service parameters */
1560 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1561 pcmd += sizeof(uint32_t);
1562 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1563 sp = (struct serv_parm *) pcmd;
1564
1565 if (sp->cmn.fcphLow < FC_PH_4_3)
1566 sp->cmn.fcphLow = FC_PH_4_3;
1567
1568 if (sp->cmn.fcphHigh < FC_PH3)
1569 sp->cmn.fcphHigh = FC_PH3;
1570
1571 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1572 "Issue PLOGI: did:x%x",
1573 did, 0, 0);
1574
1575 phba->fc_stat.elsXmitPLOGI++;
1576 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1577 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1578
1579 if (ret == IOCB_ERROR) {
1580 lpfc_els_free_iocb(phba, elsiocb);
1581 return 1;
1582 }
1583 return 0;
1584 }
1585
1586 /**
1587 * lpfc_cmpl_els_prli - Completion callback function for prli
1588 * @phba: pointer to lpfc hba data structure.
1589 * @cmdiocb: pointer to lpfc command iocb data structure.
1590 * @rspiocb: pointer to lpfc response iocb data structure.
1591 *
1592 * This routine is the completion callback function for a Process Login
1593 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1594 * status. If there is error status reported, PRLI retry shall be attempted
1595 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1596 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1597 * ndlp to mark the PRLI completion.
1598 **/
1599 static void
1600 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1601 struct lpfc_iocbq *rspiocb)
1602 {
1603 struct lpfc_vport *vport = cmdiocb->vport;
1604 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1605 IOCB_t *irsp;
1606 struct lpfc_sli *psli;
1607 struct lpfc_nodelist *ndlp;
1608
1609 psli = &phba->sli;
1610 /* we pass cmdiocb to state machine which needs rspiocb as well */
1611 cmdiocb->context_un.rsp_iocb = rspiocb;
1612
1613 irsp = &(rspiocb->iocb);
1614 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1615 spin_lock_irq(shost->host_lock);
1616 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1617 spin_unlock_irq(shost->host_lock);
1618
1619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1620 "PRLI cmpl: status:x%x/x%x did:x%x",
1621 irsp->ulpStatus, irsp->un.ulpWord[4],
1622 ndlp->nlp_DID);
1623 /* PRLI completes to NPort <nlp_DID> */
1624 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1625 "0103 PRLI completes to NPort x%x "
1626 "Data: x%x x%x x%x x%x\n",
1627 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1628 irsp->ulpTimeout, vport->num_disc_nodes);
1629
1630 vport->fc_prli_sent--;
1631 /* Check to see if link went down during discovery */
1632 if (lpfc_els_chk_latt(vport))
1633 goto out;
1634
1635 if (irsp->ulpStatus) {
1636 /* Check for retry */
1637 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1638 /* ELS command is being retried */
1639 goto out;
1640 }
1641 /* PRLI failed */
1642 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1643 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1644 ndlp->nlp_DID, irsp->ulpStatus,
1645 irsp->un.ulpWord[4]);
1646 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1647 if (lpfc_error_lost_link(irsp))
1648 goto out;
1649 else
1650 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1651 NLP_EVT_CMPL_PRLI);
1652 } else
1653 /* Good status, call state machine */
1654 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1655 NLP_EVT_CMPL_PRLI);
1656 out:
1657 lpfc_els_free_iocb(phba, cmdiocb);
1658 return;
1659 }
1660
1661 /**
1662 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
1663 * @vport: pointer to a host virtual N_Port data structure.
1664 * @ndlp: pointer to a node-list data structure.
1665 * @retry: number of retries to the command IOCB.
1666 *
1667 * This routine issues a Process Login (PRLI) ELS command for the
1668 * @vport. The PRLI service parameters are set up in the payload of the
1669 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1670 * is put to the IOCB completion callback func field before invoking the
1671 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1672 *
1673 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1674 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1675 * will be stored into the context1 field of the IOCB for the completion
1676 * callback function to the PRLI ELS command.
1677 *
1678 * Return code
1679 * 0 - successfully issued prli iocb command for @vport
1680 * 1 - failed to issue prli iocb command for @vport
1681 **/
1682 int
1683 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1684 uint8_t retry)
1685 {
1686 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1687 struct lpfc_hba *phba = vport->phba;
1688 PRLI *npr;
1689 IOCB_t *icmd;
1690 struct lpfc_iocbq *elsiocb;
1691 uint8_t *pcmd;
1692 uint16_t cmdsize;
1693
1694 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1695 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1696 ndlp->nlp_DID, ELS_CMD_PRLI);
1697 if (!elsiocb)
1698 return 1;
1699
1700 icmd = &elsiocb->iocb;
1701 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1702
1703 /* For PRLI request, remainder of payload is service parameters */
1704 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
1705 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
1706 pcmd += sizeof(uint32_t);
1707
1708 /* For PRLI, remainder of payload is PRLI parameter page */
1709 npr = (PRLI *) pcmd;
1710 /*
1711 * If our firmware version is 3.20 or later,
1712 * set the following bits for FC-TAPE support.
1713 */
1714 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1715 npr->ConfmComplAllowed = 1;
1716 npr->Retry = 1;
1717 npr->TaskRetryIdReq = 1;
1718 }
1719 npr->estabImagePair = 1;
1720 npr->readXferRdyDis = 1;
1721
1722 /* For FCP support */
1723 npr->prliType = PRLI_FCP_TYPE;
1724 npr->initiatorFunc = 1;
1725
1726 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1727 "Issue PRLI: did:x%x",
1728 ndlp->nlp_DID, 0, 0);
1729
1730 phba->fc_stat.elsXmitPRLI++;
1731 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
1732 spin_lock_irq(shost->host_lock);
1733 ndlp->nlp_flag |= NLP_PRLI_SND;
1734 spin_unlock_irq(shost->host_lock);
1735 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1736 IOCB_ERROR) {
1737 spin_lock_irq(shost->host_lock);
1738 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1739 spin_unlock_irq(shost->host_lock);
1740 lpfc_els_free_iocb(phba, elsiocb);
1741 return 1;
1742 }
1743 vport->fc_prli_sent++;
1744 return 0;
1745 }
1746
1747 /**
1748 * lpfc_rscn_disc - Perform rscn discovery for a vport
1749 * @vport: pointer to a host virtual N_Port data structure.
1750 *
1751 * This routine performs Registration State Change Notification (RSCN)
1752 * discovery for a @vport. If the @vport's node port recovery count is not
1753 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1754 * the nodes that need recovery. If none of the PLOGI were needed through
1755 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1756 * invoked to check and handle possible more RSCN came in during the period
1757 * of processing the current ones.
1758 **/
1759 static void
1760 lpfc_rscn_disc(struct lpfc_vport *vport)
1761 {
1762 lpfc_can_disctmo(vport);
1763
1764 /* RSCN discovery */
1765 /* go thru NPR nodes and issue ELS PLOGIs */
1766 if (vport->fc_npr_cnt)
1767 if (lpfc_els_disc_plogi(vport))
1768 return;
1769
1770 lpfc_end_rscn(vport);
1771 }
1772
1773 /**
1774 * lpfc_adisc_done - Complete the adisc phase of discovery
1775 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
1776 *
1777 * This function is called when the final ADISC is completed during discovery.
1778 * This function handles clearing link attention or issuing reg_vpi depending
1779 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
1780 * discovery.
1781 * This function is called with no locks held.
1782 **/
1783 static void
1784 lpfc_adisc_done(struct lpfc_vport *vport)
1785 {
1786 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1787 struct lpfc_hba *phba = vport->phba;
1788
1789 /*
1790 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1791 * and continue discovery.
1792 */
1793 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1794 !(vport->fc_flag & FC_RSCN_MODE) &&
1795 (phba->sli_rev < LPFC_SLI_REV4)) {
1796 lpfc_issue_reg_vpi(phba, vport);
1797 return;
1798 }
1799 /*
1800 * For SLI2, we need to set port_state to READY
1801 * and continue discovery.
1802 */
1803 if (vport->port_state < LPFC_VPORT_READY) {
1804 /* If we get here, there is nothing to ADISC */
1805 if (vport->port_type == LPFC_PHYSICAL_PORT)
1806 lpfc_issue_clear_la(phba, vport);
1807 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1808 vport->num_disc_nodes = 0;
1809 /* go thru NPR list, issue ELS PLOGIs */
1810 if (vport->fc_npr_cnt)
1811 lpfc_els_disc_plogi(vport);
1812 if (!vport->num_disc_nodes) {
1813 spin_lock_irq(shost->host_lock);
1814 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1815 spin_unlock_irq(shost->host_lock);
1816 lpfc_can_disctmo(vport);
1817 lpfc_end_rscn(vport);
1818 }
1819 }
1820 vport->port_state = LPFC_VPORT_READY;
1821 } else
1822 lpfc_rscn_disc(vport);
1823 }
1824
1825 /**
1826 * lpfc_more_adisc - Issue more adisc as needed
1827 * @vport: pointer to a host virtual N_Port data structure.
1828 *
1829 * This routine determines whether there are more ndlps on a @vport
1830 * node list need to have Address Discover (ADISC) issued. If so, it will
1831 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
1832 * remaining nodes which need to have ADISC sent.
1833 **/
1834 void
1835 lpfc_more_adisc(struct lpfc_vport *vport)
1836 {
1837 int sentadisc;
1838
1839 if (vport->num_disc_nodes)
1840 vport->num_disc_nodes--;
1841 /* Continue discovery with <num_disc_nodes> ADISCs to go */
1842 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1843 "0210 Continue discovery with %d ADISCs to go "
1844 "Data: x%x x%x x%x\n",
1845 vport->num_disc_nodes, vport->fc_adisc_cnt,
1846 vport->fc_flag, vport->port_state);
1847 /* Check to see if there are more ADISCs to be sent */
1848 if (vport->fc_flag & FC_NLP_MORE) {
1849 lpfc_set_disctmo(vport);
1850 /* go thru NPR nodes and issue any remaining ELS ADISCs */
1851 sentadisc = lpfc_els_disc_adisc(vport);
1852 }
1853 if (!vport->num_disc_nodes)
1854 lpfc_adisc_done(vport);
1855 return;
1856 }
1857
1858 /**
1859 * lpfc_cmpl_els_adisc - Completion callback function for adisc
1860 * @phba: pointer to lpfc hba data structure.
1861 * @cmdiocb: pointer to lpfc command iocb data structure.
1862 * @rspiocb: pointer to lpfc response iocb data structure.
1863 *
1864 * This routine is the completion function for issuing the Address Discover
1865 * (ADISC) command. It first checks to see whether link went down during
1866 * the discovery process. If so, the node will be marked as node port
1867 * recovery for issuing discover IOCB by the link attention handler and
1868 * exit. Otherwise, the response status is checked. If error was reported
1869 * in the response status, the ADISC command shall be retried by invoking
1870 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
1871 * the response status, the state machine is invoked to set transition
1872 * with respect to NLP_EVT_CMPL_ADISC event.
1873 **/
1874 static void
1875 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1876 struct lpfc_iocbq *rspiocb)
1877 {
1878 struct lpfc_vport *vport = cmdiocb->vport;
1879 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1880 IOCB_t *irsp;
1881 struct lpfc_nodelist *ndlp;
1882 int disc;
1883
1884 /* we pass cmdiocb to state machine which needs rspiocb as well */
1885 cmdiocb->context_un.rsp_iocb = rspiocb;
1886
1887 irsp = &(rspiocb->iocb);
1888 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1889
1890 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1891 "ADISC cmpl: status:x%x/x%x did:x%x",
1892 irsp->ulpStatus, irsp->un.ulpWord[4],
1893 ndlp->nlp_DID);
1894
1895 /* Since ndlp can be freed in the disc state machine, note if this node
1896 * is being used during discovery.
1897 */
1898 spin_lock_irq(shost->host_lock);
1899 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1900 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1901 spin_unlock_irq(shost->host_lock);
1902 /* ADISC completes to NPort <nlp_DID> */
1903 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1904 "0104 ADISC completes to NPort x%x "
1905 "Data: x%x x%x x%x x%x x%x\n",
1906 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1907 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1908 /* Check to see if link went down during discovery */
1909 if (lpfc_els_chk_latt(vport)) {
1910 spin_lock_irq(shost->host_lock);
1911 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1912 spin_unlock_irq(shost->host_lock);
1913 goto out;
1914 }
1915
1916 if (irsp->ulpStatus) {
1917 /* Check for retry */
1918 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1919 /* ELS command is being retried */
1920 if (disc) {
1921 spin_lock_irq(shost->host_lock);
1922 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1923 spin_unlock_irq(shost->host_lock);
1924 lpfc_set_disctmo(vport);
1925 }
1926 goto out;
1927 }
1928 /* ADISC failed */
1929 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1930 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
1931 ndlp->nlp_DID, irsp->ulpStatus,
1932 irsp->un.ulpWord[4]);
1933 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1934 if (!lpfc_error_lost_link(irsp))
1935 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1936 NLP_EVT_CMPL_ADISC);
1937 } else
1938 /* Good status, call state machine */
1939 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1940 NLP_EVT_CMPL_ADISC);
1941
1942 /* Check to see if there are more ADISCs to be sent */
1943 if (disc && vport->num_disc_nodes)
1944 lpfc_more_adisc(vport);
1945 out:
1946 lpfc_els_free_iocb(phba, cmdiocb);
1947 return;
1948 }
1949
1950 /**
1951 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
1952 * @vport: pointer to a virtual N_Port data structure.
1953 * @ndlp: pointer to a node-list data structure.
1954 * @retry: number of retries to the command IOCB.
1955 *
1956 * This routine issues an Address Discover (ADISC) for an @ndlp on a
1957 * @vport. It prepares the payload of the ADISC ELS command, updates the
1958 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
1959 * to issue the ADISC ELS command.
1960 *
1961 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1962 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1963 * will be stored into the context1 field of the IOCB for the completion
1964 * callback function to the ADISC ELS command.
1965 *
1966 * Return code
1967 * 0 - successfully issued adisc
1968 * 1 - failed to issue adisc
1969 **/
1970 int
1971 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1972 uint8_t retry)
1973 {
1974 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1975 struct lpfc_hba *phba = vport->phba;
1976 ADISC *ap;
1977 IOCB_t *icmd;
1978 struct lpfc_iocbq *elsiocb;
1979 uint8_t *pcmd;
1980 uint16_t cmdsize;
1981
1982 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1983 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1984 ndlp->nlp_DID, ELS_CMD_ADISC);
1985 if (!elsiocb)
1986 return 1;
1987
1988 icmd = &elsiocb->iocb;
1989 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1990
1991 /* For ADISC request, remainder of payload is service parameters */
1992 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1993 pcmd += sizeof(uint32_t);
1994
1995 /* Fill in ADISC payload */
1996 ap = (ADISC *) pcmd;
1997 ap->hardAL_PA = phba->fc_pref_ALPA;
1998 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1999 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2000 ap->DID = be32_to_cpu(vport->fc_myDID);
2001
2002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2003 "Issue ADISC: did:x%x",
2004 ndlp->nlp_DID, 0, 0);
2005
2006 phba->fc_stat.elsXmitADISC++;
2007 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2008 spin_lock_irq(shost->host_lock);
2009 ndlp->nlp_flag |= NLP_ADISC_SND;
2010 spin_unlock_irq(shost->host_lock);
2011 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2012 IOCB_ERROR) {
2013 spin_lock_irq(shost->host_lock);
2014 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2015 spin_unlock_irq(shost->host_lock);
2016 lpfc_els_free_iocb(phba, elsiocb);
2017 return 1;
2018 }
2019 return 0;
2020 }
2021
2022 /**
2023 * lpfc_cmpl_els_logo - Completion callback function for logo
2024 * @phba: pointer to lpfc hba data structure.
2025 * @cmdiocb: pointer to lpfc command iocb data structure.
2026 * @rspiocb: pointer to lpfc response iocb data structure.
2027 *
2028 * This routine is the completion function for issuing the ELS Logout (LOGO)
2029 * command. If no error status was reported from the LOGO response, the
2030 * state machine of the associated ndlp shall be invoked for transition with
2031 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2032 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2033 **/
2034 static void
2035 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2036 struct lpfc_iocbq *rspiocb)
2037 {
2038 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2039 struct lpfc_vport *vport = ndlp->vport;
2040 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2041 IOCB_t *irsp;
2042 struct lpfc_sli *psli;
2043
2044 psli = &phba->sli;
2045 /* we pass cmdiocb to state machine which needs rspiocb as well */
2046 cmdiocb->context_un.rsp_iocb = rspiocb;
2047
2048 irsp = &(rspiocb->iocb);
2049 spin_lock_irq(shost->host_lock);
2050 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2051 spin_unlock_irq(shost->host_lock);
2052
2053 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2054 "LOGO cmpl: status:x%x/x%x did:x%x",
2055 irsp->ulpStatus, irsp->un.ulpWord[4],
2056 ndlp->nlp_DID);
2057 /* LOGO completes to NPort <nlp_DID> */
2058 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2059 "0105 LOGO completes to NPort x%x "
2060 "Data: x%x x%x x%x x%x\n",
2061 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2062 irsp->ulpTimeout, vport->num_disc_nodes);
2063 /* Check to see if link went down during discovery */
2064 if (lpfc_els_chk_latt(vport))
2065 goto out;
2066
2067 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2068 /* NLP_EVT_DEVICE_RM should unregister the RPI
2069 * which should abort all outstanding IOs.
2070 */
2071 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2072 NLP_EVT_DEVICE_RM);
2073 goto out;
2074 }
2075
2076 if (irsp->ulpStatus) {
2077 /* Check for retry */
2078 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
2079 /* ELS command is being retried */
2080 goto out;
2081 /* LOGO failed */
2082 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2083 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2084 ndlp->nlp_DID, irsp->ulpStatus,
2085 irsp->un.ulpWord[4]);
2086 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2087 if (lpfc_error_lost_link(irsp))
2088 goto out;
2089 else
2090 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2091 NLP_EVT_CMPL_LOGO);
2092 } else
2093 /* Good status, call state machine.
2094 * This will unregister the rpi if needed.
2095 */
2096 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2097 NLP_EVT_CMPL_LOGO);
2098 out:
2099 lpfc_els_free_iocb(phba, cmdiocb);
2100 return;
2101 }
2102
2103 /**
2104 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2105 * @vport: pointer to a virtual N_Port data structure.
2106 * @ndlp: pointer to a node-list data structure.
2107 * @retry: number of retries to the command IOCB.
2108 *
2109 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2110 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2111 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2112 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2113 *
2114 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2115 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2116 * will be stored into the context1 field of the IOCB for the completion
2117 * callback function to the LOGO ELS command.
2118 *
2119 * Return code
2120 * 0 - successfully issued logo
2121 * 1 - failed to issue logo
2122 **/
2123 int
2124 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2125 uint8_t retry)
2126 {
2127 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2128 struct lpfc_hba *phba = vport->phba;
2129 IOCB_t *icmd;
2130 struct lpfc_iocbq *elsiocb;
2131 uint8_t *pcmd;
2132 uint16_t cmdsize;
2133 int rc;
2134
2135 spin_lock_irq(shost->host_lock);
2136 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2137 spin_unlock_irq(shost->host_lock);
2138 return 0;
2139 }
2140 spin_unlock_irq(shost->host_lock);
2141
2142 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2143 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2144 ndlp->nlp_DID, ELS_CMD_LOGO);
2145 if (!elsiocb)
2146 return 1;
2147
2148 icmd = &elsiocb->iocb;
2149 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2150 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2151 pcmd += sizeof(uint32_t);
2152
2153 /* Fill in LOGO payload */
2154 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2155 pcmd += sizeof(uint32_t);
2156 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2157
2158 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2159 "Issue LOGO: did:x%x",
2160 ndlp->nlp_DID, 0, 0);
2161
2162 phba->fc_stat.elsXmitLOGO++;
2163 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2164 spin_lock_irq(shost->host_lock);
2165 ndlp->nlp_flag |= NLP_LOGO_SND;
2166 spin_unlock_irq(shost->host_lock);
2167 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2168
2169 if (rc == IOCB_ERROR) {
2170 spin_lock_irq(shost->host_lock);
2171 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2172 spin_unlock_irq(shost->host_lock);
2173 lpfc_els_free_iocb(phba, elsiocb);
2174 return 1;
2175 }
2176 return 0;
2177 }
2178
2179 /**
2180 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2181 * @phba: pointer to lpfc hba data structure.
2182 * @cmdiocb: pointer to lpfc command iocb data structure.
2183 * @rspiocb: pointer to lpfc response iocb data structure.
2184 *
2185 * This routine is a generic completion callback function for ELS commands.
2186 * Specifically, it is the callback function which does not need to perform
2187 * any command specific operations. It is currently used by the ELS command
2188 * issuing routines for the ELS State Change Request (SCR),
2189 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2190 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2191 * certain debug loggings, this callback function simply invokes the
2192 * lpfc_els_chk_latt() routine to check whether link went down during the
2193 * discovery process.
2194 **/
2195 static void
2196 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2197 struct lpfc_iocbq *rspiocb)
2198 {
2199 struct lpfc_vport *vport = cmdiocb->vport;
2200 IOCB_t *irsp;
2201
2202 irsp = &rspiocb->iocb;
2203
2204 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2205 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2206 irsp->ulpStatus, irsp->un.ulpWord[4],
2207 irsp->un.elsreq64.remoteID);
2208 /* ELS cmd tag <ulpIoTag> completes */
2209 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2210 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2211 irsp->ulpIoTag, irsp->ulpStatus,
2212 irsp->un.ulpWord[4], irsp->ulpTimeout);
2213 /* Check to see if link went down during discovery */
2214 lpfc_els_chk_latt(vport);
2215 lpfc_els_free_iocb(phba, cmdiocb);
2216 return;
2217 }
2218
2219 /**
2220 * lpfc_issue_els_scr - Issue a scr to an node on a vport
2221 * @vport: pointer to a host virtual N_Port data structure.
2222 * @nportid: N_Port identifier to the remote node.
2223 * @retry: number of retries to the command IOCB.
2224 *
2225 * This routine issues a State Change Request (SCR) to a fabric node
2226 * on a @vport. The remote node @nportid is passed into the function. It
2227 * first search the @vport node list to find the matching ndlp. If no such
2228 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2229 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2230 * routine is invoked to send the SCR IOCB.
2231 *
2232 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2233 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2234 * will be stored into the context1 field of the IOCB for the completion
2235 * callback function to the SCR ELS command.
2236 *
2237 * Return code
2238 * 0 - Successfully issued scr command
2239 * 1 - Failed to issue scr command
2240 **/
2241 int
2242 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2243 {
2244 struct lpfc_hba *phba = vport->phba;
2245 IOCB_t *icmd;
2246 struct lpfc_iocbq *elsiocb;
2247 struct lpfc_sli *psli;
2248 uint8_t *pcmd;
2249 uint16_t cmdsize;
2250 struct lpfc_nodelist *ndlp;
2251
2252 psli = &phba->sli;
2253 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2254
2255 ndlp = lpfc_findnode_did(vport, nportid);
2256 if (!ndlp) {
2257 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2258 if (!ndlp)
2259 return 1;
2260 lpfc_nlp_init(vport, ndlp, nportid);
2261 lpfc_enqueue_node(vport, ndlp);
2262 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2263 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2264 if (!ndlp)
2265 return 1;
2266 }
2267
2268 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2269 ndlp->nlp_DID, ELS_CMD_SCR);
2270
2271 if (!elsiocb) {
2272 /* This will trigger the release of the node just
2273 * allocated
2274 */
2275 lpfc_nlp_put(ndlp);
2276 return 1;
2277 }
2278
2279 icmd = &elsiocb->iocb;
2280 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2281
2282 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
2283 pcmd += sizeof(uint32_t);
2284
2285 /* For SCR, remainder of payload is SCR parameter page */
2286 memset(pcmd, 0, sizeof(SCR));
2287 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2288
2289 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2290 "Issue SCR: did:x%x",
2291 ndlp->nlp_DID, 0, 0);
2292
2293 phba->fc_stat.elsXmitSCR++;
2294 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2295 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2296 IOCB_ERROR) {
2297 /* The additional lpfc_nlp_put will cause the following
2298 * lpfc_els_free_iocb routine to trigger the rlease of
2299 * the node.
2300 */
2301 lpfc_nlp_put(ndlp);
2302 lpfc_els_free_iocb(phba, elsiocb);
2303 return 1;
2304 }
2305 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2306 * trigger the release of node.
2307 */
2308 lpfc_nlp_put(ndlp);
2309 return 0;
2310 }
2311
2312 /**
2313 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2314 * @vport: pointer to a host virtual N_Port data structure.
2315 * @nportid: N_Port identifier to the remote node.
2316 * @retry: number of retries to the command IOCB.
2317 *
2318 * This routine issues a Fibre Channel Address Resolution Response
2319 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2320 * is passed into the function. It first search the @vport node list to find
2321 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2322 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2323 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2324 *
2325 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2326 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2327 * will be stored into the context1 field of the IOCB for the completion
2328 * callback function to the PARPR ELS command.
2329 *
2330 * Return code
2331 * 0 - Successfully issued farpr command
2332 * 1 - Failed to issue farpr command
2333 **/
2334 static int
2335 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2336 {
2337 struct lpfc_hba *phba = vport->phba;
2338 IOCB_t *icmd;
2339 struct lpfc_iocbq *elsiocb;
2340 struct lpfc_sli *psli;
2341 FARP *fp;
2342 uint8_t *pcmd;
2343 uint32_t *lp;
2344 uint16_t cmdsize;
2345 struct lpfc_nodelist *ondlp;
2346 struct lpfc_nodelist *ndlp;
2347
2348 psli = &phba->sli;
2349 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2350
2351 ndlp = lpfc_findnode_did(vport, nportid);
2352 if (!ndlp) {
2353 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2354 if (!ndlp)
2355 return 1;
2356 lpfc_nlp_init(vport, ndlp, nportid);
2357 lpfc_enqueue_node(vport, ndlp);
2358 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2359 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2360 if (!ndlp)
2361 return 1;
2362 }
2363
2364 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2365 ndlp->nlp_DID, ELS_CMD_RNID);
2366 if (!elsiocb) {
2367 /* This will trigger the release of the node just
2368 * allocated
2369 */
2370 lpfc_nlp_put(ndlp);
2371 return 1;
2372 }
2373
2374 icmd = &elsiocb->iocb;
2375 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2376
2377 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
2378 pcmd += sizeof(uint32_t);
2379
2380 /* Fill in FARPR payload */
2381 fp = (FARP *) (pcmd);
2382 memset(fp, 0, sizeof(FARP));
2383 lp = (uint32_t *) pcmd;
2384 *lp++ = be32_to_cpu(nportid);
2385 *lp++ = be32_to_cpu(vport->fc_myDID);
2386 fp->Rflags = 0;
2387 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2388
2389 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2390 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2391 ondlp = lpfc_findnode_did(vport, nportid);
2392 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
2393 memcpy(&fp->OportName, &ondlp->nlp_portname,
2394 sizeof(struct lpfc_name));
2395 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
2396 sizeof(struct lpfc_name));
2397 }
2398
2399 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2400 "Issue FARPR: did:x%x",
2401 ndlp->nlp_DID, 0, 0);
2402
2403 phba->fc_stat.elsXmitFARPR++;
2404 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2405 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2406 IOCB_ERROR) {
2407 /* The additional lpfc_nlp_put will cause the following
2408 * lpfc_els_free_iocb routine to trigger the release of
2409 * the node.
2410 */
2411 lpfc_nlp_put(ndlp);
2412 lpfc_els_free_iocb(phba, elsiocb);
2413 return 1;
2414 }
2415 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2416 * trigger the release of the node.
2417 */
2418 lpfc_nlp_put(ndlp);
2419 return 0;
2420 }
2421
2422 /**
2423 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
2424 * @vport: pointer to a host virtual N_Port data structure.
2425 * @nlp: pointer to a node-list data structure.
2426 *
2427 * This routine cancels the timer with a delayed IOCB-command retry for
2428 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2429 * removes the ELS retry event if it presents. In addition, if the
2430 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2431 * commands are sent for the @vport's nodes that require issuing discovery
2432 * ADISC.
2433 **/
2434 void
2435 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
2436 {
2437 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2438 struct lpfc_work_evt *evtp;
2439
2440 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2441 return;
2442 spin_lock_irq(shost->host_lock);
2443 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2444 spin_unlock_irq(shost->host_lock);
2445 del_timer_sync(&nlp->nlp_delayfunc);
2446 nlp->nlp_last_elscmd = 0;
2447 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
2448 list_del_init(&nlp->els_retry_evt.evt_listp);
2449 /* Decrement nlp reference count held for the delayed retry */
2450 evtp = &nlp->els_retry_evt;
2451 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2452 }
2453 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2454 spin_lock_irq(shost->host_lock);
2455 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2456 spin_unlock_irq(shost->host_lock);
2457 if (vport->num_disc_nodes) {
2458 if (vport->port_state < LPFC_VPORT_READY) {
2459 /* Check if there are more ADISCs to be sent */
2460 lpfc_more_adisc(vport);
2461 } else {
2462 /* Check if there are more PLOGIs to be sent */
2463 lpfc_more_plogi(vport);
2464 if (vport->num_disc_nodes == 0) {
2465 spin_lock_irq(shost->host_lock);
2466 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2467 spin_unlock_irq(shost->host_lock);
2468 lpfc_can_disctmo(vport);
2469 lpfc_end_rscn(vport);
2470 }
2471 }
2472 }
2473 }
2474 return;
2475 }
2476
2477 /**
2478 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
2479 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2480 *
2481 * This routine is invoked by the ndlp delayed-function timer to check
2482 * whether there is any pending ELS retry event(s) with the node. If not, it
2483 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2484 * adds the delayed events to the HBA work list and invokes the
2485 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2486 * event. Note that lpfc_nlp_get() is called before posting the event to
2487 * the work list to hold reference count of ndlp so that it guarantees the
2488 * reference to ndlp will still be available when the worker thread gets
2489 * to the event associated with the ndlp.
2490 **/
2491 void
2492 lpfc_els_retry_delay(unsigned long ptr)
2493 {
2494 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2495 struct lpfc_vport *vport = ndlp->vport;
2496 struct lpfc_hba *phba = vport->phba;
2497 unsigned long flags;
2498 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
2499
2500 spin_lock_irqsave(&phba->hbalock, flags);
2501 if (!list_empty(&evtp->evt_listp)) {
2502 spin_unlock_irqrestore(&phba->hbalock, flags);
2503 return;
2504 }
2505
2506 /* We need to hold the node by incrementing the reference
2507 * count until the queued work is done
2508 */
2509 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
2510 if (evtp->evt_arg1) {
2511 evtp->evt = LPFC_EVT_ELS_RETRY;
2512 list_add_tail(&evtp->evt_listp, &phba->work_list);
2513 lpfc_worker_wake_up(phba);
2514 }
2515 spin_unlock_irqrestore(&phba->hbalock, flags);
2516 return;
2517 }
2518
2519 /**
2520 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
2521 * @ndlp: pointer to a node-list data structure.
2522 *
2523 * This routine is the worker-thread handler for processing the @ndlp delayed
2524 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2525 * the last ELS command from the associated ndlp and invokes the proper ELS
2526 * function according to the delayed ELS command to retry the command.
2527 **/
2528 void
2529 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2530 {
2531 struct lpfc_vport *vport = ndlp->vport;
2532 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2533 uint32_t cmd, did, retry;
2534
2535 spin_lock_irq(shost->host_lock);
2536 did = ndlp->nlp_DID;
2537 cmd = ndlp->nlp_last_elscmd;
2538 ndlp->nlp_last_elscmd = 0;
2539
2540 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2541 spin_unlock_irq(shost->host_lock);
2542 return;
2543 }
2544
2545 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2546 spin_unlock_irq(shost->host_lock);
2547 /*
2548 * If a discovery event readded nlp_delayfunc after timer
2549 * firing and before processing the timer, cancel the
2550 * nlp_delayfunc.
2551 */
2552 del_timer_sync(&ndlp->nlp_delayfunc);
2553 retry = ndlp->nlp_retry;
2554 ndlp->nlp_retry = 0;
2555
2556 switch (cmd) {
2557 case ELS_CMD_FLOGI:
2558 lpfc_issue_els_flogi(vport, ndlp, retry);
2559 break;
2560 case ELS_CMD_PLOGI:
2561 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
2562 ndlp->nlp_prev_state = ndlp->nlp_state;
2563 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2564 }
2565 break;
2566 case ELS_CMD_ADISC:
2567 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
2568 ndlp->nlp_prev_state = ndlp->nlp_state;
2569 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2570 }
2571 break;
2572 case ELS_CMD_PRLI:
2573 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
2574 ndlp->nlp_prev_state = ndlp->nlp_state;
2575 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2576 }
2577 break;
2578 case ELS_CMD_LOGO:
2579 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
2580 ndlp->nlp_prev_state = ndlp->nlp_state;
2581 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2582 }
2583 break;
2584 case ELS_CMD_FDISC:
2585 lpfc_issue_els_fdisc(vport, ndlp, retry);
2586 break;
2587 }
2588 return;
2589 }
2590
2591 /**
2592 * lpfc_els_retry - Make retry decision on an els command iocb
2593 * @phba: pointer to lpfc hba data structure.
2594 * @cmdiocb: pointer to lpfc command iocb data structure.
2595 * @rspiocb: pointer to lpfc response iocb data structure.
2596 *
2597 * This routine makes a retry decision on an ELS command IOCB, which has
2598 * failed. The following ELS IOCBs use this function for retrying the command
2599 * when previously issued command responsed with error status: FLOGI, PLOGI,
2600 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2601 * returned error status, it makes the decision whether a retry shall be
2602 * issued for the command, and whether a retry shall be made immediately or
2603 * delayed. In the former case, the corresponding ELS command issuing-function
2604 * is called to retry the command. In the later case, the ELS command shall
2605 * be posted to the ndlp delayed event and delayed function timer set to the
2606 * ndlp for the delayed command issusing.
2607 *
2608 * Return code
2609 * 0 - No retry of els command is made
2610 * 1 - Immediate or delayed retry of els command is made
2611 **/
2612 static int
2613 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2614 struct lpfc_iocbq *rspiocb)
2615 {
2616 struct lpfc_vport *vport = cmdiocb->vport;
2617 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2618 IOCB_t *irsp = &rspiocb->iocb;
2619 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2620 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2621 uint32_t *elscmd;
2622 struct ls_rjt stat;
2623 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
2624 int logerr = 0;
2625 uint32_t cmd = 0;
2626 uint32_t did;
2627
2628
2629 /* Note: context2 may be 0 for internal driver abort
2630 * of delays ELS command.
2631 */
2632
2633 if (pcmd && pcmd->virt) {
2634 elscmd = (uint32_t *) (pcmd->virt);
2635 cmd = *elscmd++;
2636 }
2637
2638 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2639 did = ndlp->nlp_DID;
2640 else {
2641 /* We should only hit this case for retrying PLOGI */
2642 did = irsp->un.elsreq64.remoteID;
2643 ndlp = lpfc_findnode_did(vport, did);
2644 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
2645 && (cmd != ELS_CMD_PLOGI))
2646 return 1;
2647 }
2648
2649 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2650 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
2651 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
2652
2653 switch (irsp->ulpStatus) {
2654 case IOSTAT_FCP_RSP_ERROR:
2655 case IOSTAT_REMOTE_STOP:
2656 break;
2657
2658 case IOSTAT_LOCAL_REJECT:
2659 switch ((irsp->un.ulpWord[4] & 0xff)) {
2660 case IOERR_LOOP_OPEN_FAILURE:
2661 if (cmd == ELS_CMD_FLOGI) {
2662 if (PCI_DEVICE_ID_HORNET ==
2663 phba->pcidev->device) {
2664 phba->fc_topology = TOPOLOGY_LOOP;
2665 phba->pport->fc_myDID = 0;
2666 phba->alpa_map[0] = 0;
2667 phba->alpa_map[1] = 0;
2668 }
2669 }
2670 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
2671 delay = 1000;
2672 retry = 1;
2673 break;
2674
2675 case IOERR_ILLEGAL_COMMAND:
2676 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2677 "0124 Retry illegal cmd x%x "
2678 "retry:x%x delay:x%x\n",
2679 cmd, cmdiocb->retry, delay);
2680 retry = 1;
2681 /* All command's retry policy */
2682 maxretry = 8;
2683 if (cmdiocb->retry > 2)
2684 delay = 1000;
2685 break;
2686
2687 case IOERR_NO_RESOURCES:
2688 logerr = 1; /* HBA out of resources */
2689 retry = 1;
2690 if (cmdiocb->retry > 100)
2691 delay = 100;
2692 maxretry = 250;
2693 break;
2694
2695 case IOERR_ILLEGAL_FRAME:
2696 delay = 100;
2697 retry = 1;
2698 break;
2699
2700 case IOERR_SEQUENCE_TIMEOUT:
2701 case IOERR_INVALID_RPI:
2702 retry = 1;
2703 break;
2704 }
2705 break;
2706
2707 case IOSTAT_NPORT_RJT:
2708 case IOSTAT_FABRIC_RJT:
2709 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
2710 retry = 1;
2711 break;
2712 }
2713 break;
2714
2715 case IOSTAT_NPORT_BSY:
2716 case IOSTAT_FABRIC_BSY:
2717 logerr = 1; /* Fabric / Remote NPort out of resources */
2718 retry = 1;
2719 break;
2720
2721 case IOSTAT_LS_RJT:
2722 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
2723 /* Added for Vendor specifc support
2724 * Just keep retrying for these Rsn / Exp codes
2725 */
2726 switch (stat.un.b.lsRjtRsnCode) {
2727 case LSRJT_UNABLE_TPC:
2728 if (stat.un.b.lsRjtRsnCodeExp ==
2729 LSEXP_CMD_IN_PROGRESS) {
2730 if (cmd == ELS_CMD_PLOGI) {
2731 delay = 1000;
2732 maxretry = 48;
2733 }
2734 retry = 1;
2735 break;
2736 }
2737 if (cmd == ELS_CMD_PLOGI) {
2738 delay = 1000;
2739 maxretry = lpfc_max_els_tries + 1;
2740 retry = 1;
2741 break;
2742 }
2743 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2744 (cmd == ELS_CMD_FDISC) &&
2745 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
2746 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2747 "0125 FDISC Failed (x%x). "
2748 "Fabric out of resources\n",
2749 stat.un.lsRjtError);
2750 lpfc_vport_set_state(vport,
2751 FC_VPORT_NO_FABRIC_RSCS);
2752 }
2753 break;
2754
2755 case LSRJT_LOGICAL_BSY:
2756 if ((cmd == ELS_CMD_PLOGI) ||
2757 (cmd == ELS_CMD_PRLI)) {
2758 delay = 1000;
2759 maxretry = 48;
2760 } else if (cmd == ELS_CMD_FDISC) {
2761 /* FDISC retry policy */
2762 maxretry = 48;
2763 if (cmdiocb->retry >= 32)
2764 delay = 1000;
2765 }
2766 retry = 1;
2767 break;
2768
2769 case LSRJT_LOGICAL_ERR:
2770 /* There are some cases where switches return this
2771 * error when they are not ready and should be returning
2772 * Logical Busy. We should delay every time.
2773 */
2774 if (cmd == ELS_CMD_FDISC &&
2775 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2776 maxretry = 3;
2777 delay = 1000;
2778 retry = 1;
2779 break;
2780 }
2781 case LSRJT_PROTOCOL_ERR:
2782 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2783 (cmd == ELS_CMD_FDISC) &&
2784 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
2785 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2786 ) {
2787 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2788 "0122 FDISC Failed (x%x). "
2789 "Fabric Detected Bad WWN\n",
2790 stat.un.lsRjtError);
2791 lpfc_vport_set_state(vport,
2792 FC_VPORT_FABRIC_REJ_WWN);
2793 }
2794 break;
2795 }
2796 break;
2797
2798 case IOSTAT_INTERMED_RSP:
2799 case IOSTAT_BA_RJT:
2800 break;
2801
2802 default:
2803 break;
2804 }
2805
2806 if (did == FDMI_DID)
2807 retry = 1;
2808
2809 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
2810 (phba->fc_topology != TOPOLOGY_LOOP) &&
2811 !lpfc_error_lost_link(irsp)) {
2812 /* FLOGI retry policy */
2813 retry = 1;
2814 /* retry forever */
2815 maxretry = 0;
2816 if (cmdiocb->retry >= 100)
2817 delay = 5000;
2818 else if (cmdiocb->retry >= 32)
2819 delay = 1000;
2820 }
2821
2822 cmdiocb->retry++;
2823 if (maxretry && (cmdiocb->retry >= maxretry)) {
2824 phba->fc_stat.elsRetryExceeded++;
2825 retry = 0;
2826 }
2827
2828 if ((vport->load_flag & FC_UNLOADING) != 0)
2829 retry = 0;
2830
2831 if (retry) {
2832
2833 /* Retry ELS command <elsCmd> to remote NPORT <did> */
2834 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2835 "0107 Retry ELS command x%x to remote "
2836 "NPORT x%x Data: x%x x%x\n",
2837 cmd, did, cmdiocb->retry, delay);
2838
2839 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
2840 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
2841 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
2842 /* Don't reset timer for no resources */
2843
2844 /* If discovery / RSCN timer is running, reset it */
2845 if (timer_pending(&vport->fc_disctmo) ||
2846 (vport->fc_flag & FC_RSCN_MODE))
2847 lpfc_set_disctmo(vport);
2848 }
2849
2850 phba->fc_stat.elsXmitRetry++;
2851 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
2852 phba->fc_stat.elsDelayRetry++;
2853 ndlp->nlp_retry = cmdiocb->retry;
2854
2855 /* delay is specified in milliseconds */
2856 mod_timer(&ndlp->nlp_delayfunc,
2857 jiffies + msecs_to_jiffies(delay));
2858 spin_lock_irq(shost->host_lock);
2859 ndlp->nlp_flag |= NLP_DELAY_TMO;
2860 spin_unlock_irq(shost->host_lock);
2861
2862 ndlp->nlp_prev_state = ndlp->nlp_state;
2863 if (cmd == ELS_CMD_PRLI)
2864 lpfc_nlp_set_state(vport, ndlp,
2865 NLP_STE_REG_LOGIN_ISSUE);
2866 else
2867 lpfc_nlp_set_state(vport, ndlp,
2868 NLP_STE_NPR_NODE);
2869 ndlp->nlp_last_elscmd = cmd;
2870
2871 return 1;
2872 }
2873 switch (cmd) {
2874 case ELS_CMD_FLOGI:
2875 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2876 return 1;
2877 case ELS_CMD_FDISC:
2878 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2879 return 1;
2880 case ELS_CMD_PLOGI:
2881 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2882 ndlp->nlp_prev_state = ndlp->nlp_state;
2883 lpfc_nlp_set_state(vport, ndlp,
2884 NLP_STE_PLOGI_ISSUE);
2885 }
2886 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
2887 return 1;
2888 case ELS_CMD_ADISC:
2889 ndlp->nlp_prev_state = ndlp->nlp_state;
2890 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2891 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
2892 return 1;
2893 case ELS_CMD_PRLI:
2894 ndlp->nlp_prev_state = ndlp->nlp_state;
2895 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2896 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
2897 return 1;
2898 case ELS_CMD_LOGO:
2899 ndlp->nlp_prev_state = ndlp->nlp_state;
2900 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2901 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
2902 return 1;
2903 }
2904 }
2905 /* No retry ELS command <elsCmd> to remote NPORT <did> */
2906 if (logerr) {
2907 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2908 "0137 No retry ELS command x%x to remote "
2909 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
2910 cmd, did, irsp->ulpStatus,
2911 irsp->un.ulpWord[4]);
2912 }
2913 else {
2914 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2915 "0108 No retry ELS command x%x to remote "
2916 "NPORT x%x Retried:%d Error:x%x/%x\n",
2917 cmd, did, cmdiocb->retry, irsp->ulpStatus,
2918 irsp->un.ulpWord[4]);
2919 }
2920 return 0;
2921 }
2922
2923 /**
2924 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
2925 * @phba: pointer to lpfc hba data structure.
2926 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
2927 *
2928 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
2929 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
2930 * checks to see whether there is a lpfc DMA buffer associated with the
2931 * response of the command IOCB. If so, it will be released before releasing
2932 * the lpfc DMA buffer associated with the IOCB itself.
2933 *
2934 * Return code
2935 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2936 **/
2937 static int
2938 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
2939 {
2940 struct lpfc_dmabuf *buf_ptr;
2941
2942 /* Free the response before processing the command. */
2943 if (!list_empty(&buf_ptr1->list)) {
2944 list_remove_head(&buf_ptr1->list, buf_ptr,
2945 struct lpfc_dmabuf,
2946 list);
2947 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2948 kfree(buf_ptr);
2949 }
2950 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2951 kfree(buf_ptr1);
2952 return 0;
2953 }
2954
2955 /**
2956 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
2957 * @phba: pointer to lpfc hba data structure.
2958 * @buf_ptr: pointer to the lpfc dma buffer data structure.
2959 *
2960 * This routine releases the lpfc Direct Memory Access (DMA) buffer
2961 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
2962 * pool.
2963 *
2964 * Return code
2965 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
2966 **/
2967 static int
2968 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
2969 {
2970 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2971 kfree(buf_ptr);
2972 return 0;
2973 }
2974
2975 /**
2976 * lpfc_els_free_iocb - Free a command iocb and its associated resources
2977 * @phba: pointer to lpfc hba data structure.
2978 * @elsiocb: pointer to lpfc els command iocb data structure.
2979 *
2980 * This routine frees a command IOCB and its associated resources. The
2981 * command IOCB data structure contains the reference to various associated
2982 * resources, these fields must be set to NULL if the associated reference
2983 * not present:
2984 * context1 - reference to ndlp
2985 * context2 - reference to cmd
2986 * context2->next - reference to rsp
2987 * context3 - reference to bpl
2988 *
2989 * It first properly decrements the reference count held on ndlp for the
2990 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
2991 * set, it invokes the lpfc_els_free_data() routine to release the Direct
2992 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
2993 * adds the DMA buffer the @phba data structure for the delayed release.
2994 * If reference to the Buffer Pointer List (BPL) is present, the
2995 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
2996 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
2997 * invoked to release the IOCB data structure back to @phba IOCBQ list.
2998 *
2999 * Return code
3000 * 0 - Success (currently, always return 0)
3001 **/
3002 int
3003 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3004 {
3005 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
3006 struct lpfc_nodelist *ndlp;
3007
3008 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3009 if (ndlp) {
3010 if (ndlp->nlp_flag & NLP_DEFER_RM) {
3011 lpfc_nlp_put(ndlp);
3012
3013 /* If the ndlp is not being used by another discovery
3014 * thread, free it.
3015 */
3016 if (!lpfc_nlp_not_used(ndlp)) {
3017 /* If ndlp is being used by another discovery
3018 * thread, just clear NLP_DEFER_RM
3019 */
3020 ndlp->nlp_flag &= ~NLP_DEFER_RM;
3021 }
3022 }
3023 else
3024 lpfc_nlp_put(ndlp);
3025 elsiocb->context1 = NULL;
3026 }
3027 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3028 if (elsiocb->context2) {
3029 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3030 /* Firmware could still be in progress of DMAing
3031 * payload, so don't free data buffer till after
3032 * a hbeat.
3033 */
3034 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3035 buf_ptr = elsiocb->context2;
3036 elsiocb->context2 = NULL;
3037 if (buf_ptr) {
3038 buf_ptr1 = NULL;
3039 spin_lock_irq(&phba->hbalock);
3040 if (!list_empty(&buf_ptr->list)) {
3041 list_remove_head(&buf_ptr->list,
3042 buf_ptr1, struct lpfc_dmabuf,
3043 list);
3044 INIT_LIST_HEAD(&buf_ptr1->list);
3045 list_add_tail(&buf_ptr1->list,
3046 &phba->elsbuf);
3047 phba->elsbuf_cnt++;
3048 }
3049 INIT_LIST_HEAD(&buf_ptr->list);
3050 list_add_tail(&buf_ptr->list, &phba->elsbuf);
3051 phba->elsbuf_cnt++;
3052 spin_unlock_irq(&phba->hbalock);
3053 }
3054 } else {
3055 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3056 lpfc_els_free_data(phba, buf_ptr1);
3057 }
3058 }
3059
3060 if (elsiocb->context3) {
3061 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3062 lpfc_els_free_bpl(phba, buf_ptr);
3063 }
3064 lpfc_sli_release_iocbq(phba, elsiocb);
3065 return 0;
3066 }
3067
3068 /**
3069 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3070 * @phba: pointer to lpfc hba data structure.
3071 * @cmdiocb: pointer to lpfc command iocb data structure.
3072 * @rspiocb: pointer to lpfc response iocb data structure.
3073 *
3074 * This routine is the completion callback function to the Logout (LOGO)
3075 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3076 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3077 * release the ndlp if it has the last reference remaining (reference count
3078 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3079 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3080 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3081 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3082 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3083 * IOCB data structure.
3084 **/
3085 static void
3086 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3087 struct lpfc_iocbq *rspiocb)
3088 {
3089 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3090 struct lpfc_vport *vport = cmdiocb->vport;
3091 IOCB_t *irsp;
3092
3093 irsp = &rspiocb->iocb;
3094 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3095 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3096 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
3097 /* ACC to LOGO completes to NPort <nlp_DID> */
3098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3099 "0109 ACC to LOGO completes to NPort x%x "
3100 "Data: x%x x%x x%x\n",
3101 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3102 ndlp->nlp_rpi);
3103
3104 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3105 /* NPort Recovery mode or node is just allocated */
3106 if (!lpfc_nlp_not_used(ndlp)) {
3107 /* If the ndlp is being used by another discovery
3108 * thread, just unregister the RPI.
3109 */
3110 lpfc_unreg_rpi(vport, ndlp);
3111 } else {
3112 /* Indicate the node has already released, should
3113 * not reference to it from within lpfc_els_free_iocb.
3114 */
3115 cmdiocb->context1 = NULL;
3116 }
3117 }
3118 lpfc_els_free_iocb(phba, cmdiocb);
3119 return;
3120 }
3121
3122 /**
3123 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3124 * @phba: pointer to lpfc hba data structure.
3125 * @pmb: pointer to the driver internal queue element for mailbox command.
3126 *
3127 * This routine is the completion callback function for unregister default
3128 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3129 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3130 * decrements the ndlp reference count held for this completion callback
3131 * function. After that, it invokes the lpfc_nlp_not_used() to check
3132 * whether there is only one reference left on the ndlp. If so, it will
3133 * perform one more decrement and trigger the release of the ndlp.
3134 **/
3135 void
3136 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3137 {
3138 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3139 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3140
3141 /*
3142 * This routine is used to register and unregister in previous SLI
3143 * modes.
3144 */
3145 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3146 (phba->sli_rev == LPFC_SLI_REV4))
3147 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3148
3149 pmb->context1 = NULL;
3150 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3151 kfree(mp);
3152 mempool_free(pmb, phba->mbox_mem_pool);
3153 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3154 lpfc_nlp_put(ndlp);
3155 /* This is the end of the default RPI cleanup logic for this
3156 * ndlp. If no other discovery threads are using this ndlp.
3157 * we should free all resources associated with it.
3158 */
3159 lpfc_nlp_not_used(ndlp);
3160 }
3161
3162 return;
3163 }
3164
3165 /**
3166 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3167 * @phba: pointer to lpfc hba data structure.
3168 * @cmdiocb: pointer to lpfc command iocb data structure.
3169 * @rspiocb: pointer to lpfc response iocb data structure.
3170 *
3171 * This routine is the completion callback function for ELS Response IOCB
3172 * command. In normal case, this callback function just properly sets the
3173 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3174 * field in the command IOCB is not NULL, the referred mailbox command will
3175 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3176 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3177 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3178 * routine shall be invoked trying to release the ndlp if no other threads
3179 * are currently referring it.
3180 **/
3181 static void
3182 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3183 struct lpfc_iocbq *rspiocb)
3184 {
3185 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3186 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3187 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
3188 IOCB_t *irsp;
3189 uint8_t *pcmd;
3190 LPFC_MBOXQ_t *mbox = NULL;
3191 struct lpfc_dmabuf *mp = NULL;
3192 uint32_t ls_rjt = 0;
3193
3194 irsp = &rspiocb->iocb;
3195
3196 if (cmdiocb->context_un.mbox)
3197 mbox = cmdiocb->context_un.mbox;
3198
3199 /* First determine if this is a LS_RJT cmpl. Note, this callback
3200 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3201 */
3202 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
3203 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3204 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3205 /* A LS_RJT associated with Default RPI cleanup has its own
3206 * separate code path.
3207 */
3208 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3209 ls_rjt = 1;
3210 }
3211
3212 /* Check to see if link went down during discovery */
3213 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3214 if (mbox) {
3215 mp = (struct lpfc_dmabuf *) mbox->context1;
3216 if (mp) {
3217 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3218 kfree(mp);
3219 }
3220 mempool_free(mbox, phba->mbox_mem_pool);
3221 }
3222 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3223 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3224 if (lpfc_nlp_not_used(ndlp)) {
3225 ndlp = NULL;
3226 /* Indicate the node has already released,
3227 * should not reference to it from within
3228 * the routine lpfc_els_free_iocb.
3229 */
3230 cmdiocb->context1 = NULL;
3231 }
3232 goto out;
3233 }
3234
3235 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3236 "ELS rsp cmpl: status:x%x/x%x did:x%x",
3237 irsp->ulpStatus, irsp->un.ulpWord[4],
3238 cmdiocb->iocb.un.elsreq64.remoteID);
3239 /* ELS response tag <ulpIoTag> completes */
3240 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3241 "0110 ELS response tag x%x completes "
3242 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3243 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3244 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3245 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3246 ndlp->nlp_rpi);
3247 if (mbox) {
3248 if ((rspiocb->iocb.ulpStatus == 0)
3249 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
3250 lpfc_unreg_rpi(vport, ndlp);
3251 /* Increment reference count to ndlp to hold the
3252 * reference to ndlp for the callback function.
3253 */
3254 mbox->context2 = lpfc_nlp_get(ndlp);
3255 mbox->vport = vport;
3256 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3257 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3258 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3259 }
3260 else {
3261 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3262 ndlp->nlp_prev_state = ndlp->nlp_state;
3263 lpfc_nlp_set_state(vport, ndlp,
3264 NLP_STE_REG_LOGIN_ISSUE);
3265 }
3266 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3267 != MBX_NOT_FINISHED)
3268 goto out;
3269 else
3270 /* Decrement the ndlp reference count we
3271 * set for this failed mailbox command.
3272 */
3273 lpfc_nlp_put(ndlp);
3274
3275 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3276 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3277 "0138 ELS rsp: Cannot issue reg_login for x%x "
3278 "Data: x%x x%x x%x\n",
3279 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3280 ndlp->nlp_rpi);
3281
3282 if (lpfc_nlp_not_used(ndlp)) {
3283 ndlp = NULL;
3284 /* Indicate node has already been released,
3285 * should not reference to it from within
3286 * the routine lpfc_els_free_iocb.
3287 */
3288 cmdiocb->context1 = NULL;
3289 }
3290 } else {
3291 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3292 if (!lpfc_error_lost_link(irsp) &&
3293 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
3294 if (lpfc_nlp_not_used(ndlp)) {
3295 ndlp = NULL;
3296 /* Indicate node has already been
3297 * released, should not reference
3298 * to it from within the routine
3299 * lpfc_els_free_iocb.
3300 */
3301 cmdiocb->context1 = NULL;
3302 }
3303 }
3304 }
3305 mp = (struct lpfc_dmabuf *) mbox->context1;
3306 if (mp) {
3307 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3308 kfree(mp);
3309 }
3310 mempool_free(mbox, phba->mbox_mem_pool);
3311 }
3312 out:
3313 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3314 spin_lock_irq(shost->host_lock);
3315 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
3316 spin_unlock_irq(shost->host_lock);
3317
3318 /* If the node is not being used by another discovery thread,
3319 * and we are sending a reject, we are done with it.
3320 * Release driver reference count here and free associated
3321 * resources.
3322 */
3323 if (ls_rjt)
3324 if (lpfc_nlp_not_used(ndlp))
3325 /* Indicate node has already been released,
3326 * should not reference to it from within
3327 * the routine lpfc_els_free_iocb.
3328 */
3329 cmdiocb->context1 = NULL;
3330 }
3331
3332 lpfc_els_free_iocb(phba, cmdiocb);
3333 return;
3334 }
3335
3336 /**
3337 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
3338 * @vport: pointer to a host virtual N_Port data structure.
3339 * @flag: the els command code to be accepted.
3340 * @oldiocb: pointer to the original lpfc command iocb data structure.
3341 * @ndlp: pointer to a node-list data structure.
3342 * @mbox: pointer to the driver internal queue element for mailbox command.
3343 *
3344 * This routine prepares and issues an Accept (ACC) response IOCB
3345 * command. It uses the @flag to properly set up the IOCB field for the
3346 * specific ACC response command to be issued and invokes the
3347 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3348 * @mbox pointer is passed in, it will be put into the context_un.mbox
3349 * field of the IOCB for the completion callback function to issue the
3350 * mailbox command to the HBA later when callback is invoked.
3351 *
3352 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3353 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3354 * will be stored into the context1 field of the IOCB for the completion
3355 * callback function to the corresponding response ELS IOCB command.
3356 *
3357 * Return code
3358 * 0 - Successfully issued acc response
3359 * 1 - Failed to issue acc response
3360 **/
3361 int
3362 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3363 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3364 LPFC_MBOXQ_t *mbox)
3365 {
3366 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3367 struct lpfc_hba *phba = vport->phba;
3368 IOCB_t *icmd;
3369 IOCB_t *oldcmd;
3370 struct lpfc_iocbq *elsiocb;
3371 struct lpfc_sli *psli;
3372 uint8_t *pcmd;
3373 uint16_t cmdsize;
3374 int rc;
3375 ELS_PKT *els_pkt_ptr;
3376
3377 psli = &phba->sli;
3378 oldcmd = &oldiocb->iocb;
3379
3380 switch (flag) {
3381 case ELS_CMD_ACC:
3382 cmdsize = sizeof(uint32_t);
3383 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3384 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3385 if (!elsiocb) {
3386 spin_lock_irq(shost->host_lock);
3387 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3388 spin_unlock_irq(shost->host_lock);
3389 return 1;
3390 }
3391
3392 icmd = &elsiocb->iocb;
3393 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3394 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3395 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3396 pcmd += sizeof(uint32_t);
3397
3398 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3399 "Issue ACC: did:x%x flg:x%x",
3400 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3401 break;
3402 case ELS_CMD_PLOGI:
3403 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
3404 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3405 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3406 if (!elsiocb)
3407 return 1;
3408
3409 icmd = &elsiocb->iocb;
3410 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3411 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3412
3413 if (mbox)
3414 elsiocb->context_un.mbox = mbox;
3415
3416 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3417 pcmd += sizeof(uint32_t);
3418 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
3419
3420 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3421 "Issue ACC PLOGI: did:x%x flg:x%x",
3422 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3423 break;
3424 case ELS_CMD_PRLO:
3425 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
3426 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3427 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3428 if (!elsiocb)
3429 return 1;
3430
3431 icmd = &elsiocb->iocb;
3432 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3433 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3434
3435 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
3436 sizeof(uint32_t) + sizeof(PRLO));
3437 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3438 els_pkt_ptr = (ELS_PKT *) pcmd;
3439 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
3440
3441 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3442 "Issue ACC PRLO: did:x%x flg:x%x",
3443 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3444 break;
3445 default:
3446 return 1;
3447 }
3448 /* Xmit ELS ACC response tag <ulpIoTag> */
3449 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3450 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3451 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3452 elsiocb->iotag, elsiocb->iocb.ulpContext,
3453 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3454 ndlp->nlp_rpi);
3455 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
3456 spin_lock_irq(shost->host_lock);
3457 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3458 spin_unlock_irq(shost->host_lock);
3459 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3460 } else {
3461 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3462 }
3463
3464 phba->fc_stat.elsXmitACC++;
3465 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3466 if (rc == IOCB_ERROR) {
3467 lpfc_els_free_iocb(phba, elsiocb);
3468 return 1;
3469 }
3470 return 0;
3471 }
3472
3473 /**
3474 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
3475 * @vport: pointer to a virtual N_Port data structure.
3476 * @rejectError:
3477 * @oldiocb: pointer to the original lpfc command iocb data structure.
3478 * @ndlp: pointer to a node-list data structure.
3479 * @mbox: pointer to the driver internal queue element for mailbox command.
3480 *
3481 * This routine prepares and issue an Reject (RJT) response IOCB
3482 * command. If a @mbox pointer is passed in, it will be put into the
3483 * context_un.mbox field of the IOCB for the completion callback function
3484 * to issue to the HBA later.
3485 *
3486 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3487 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3488 * will be stored into the context1 field of the IOCB for the completion
3489 * callback function to the reject response ELS IOCB command.
3490 *
3491 * Return code
3492 * 0 - Successfully issued reject response
3493 * 1 - Failed to issue reject response
3494 **/
3495 int
3496 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3497 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3498 LPFC_MBOXQ_t *mbox)
3499 {
3500 struct lpfc_hba *phba = vport->phba;
3501 IOCB_t *icmd;
3502 IOCB_t *oldcmd;
3503 struct lpfc_iocbq *elsiocb;
3504 struct lpfc_sli *psli;
3505 uint8_t *pcmd;
3506 uint16_t cmdsize;
3507 int rc;
3508
3509 psli = &phba->sli;
3510 cmdsize = 2 * sizeof(uint32_t);
3511 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3512 ndlp->nlp_DID, ELS_CMD_LS_RJT);
3513 if (!elsiocb)
3514 return 1;
3515
3516 icmd = &elsiocb->iocb;
3517 oldcmd = &oldiocb->iocb;
3518 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3519 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3520
3521 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
3522 pcmd += sizeof(uint32_t);
3523 *((uint32_t *) (pcmd)) = rejectError;
3524
3525 if (mbox)
3526 elsiocb->context_un.mbox = mbox;
3527
3528 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
3529 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3530 "0129 Xmit ELS RJT x%x response tag x%x "
3531 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3532 "rpi x%x\n",
3533 rejectError, elsiocb->iotag,
3534 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3535 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
3536 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3537 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
3538 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
3539
3540 phba->fc_stat.elsXmitLSRJT++;
3541 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3542 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3543
3544 if (rc == IOCB_ERROR) {
3545 lpfc_els_free_iocb(phba, elsiocb);
3546 return 1;
3547 }
3548 return 0;
3549 }
3550
3551 /**
3552 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
3553 * @vport: pointer to a virtual N_Port data structure.
3554 * @oldiocb: pointer to the original lpfc command iocb data structure.
3555 * @ndlp: pointer to a node-list data structure.
3556 *
3557 * This routine prepares and issues an Accept (ACC) response to Address
3558 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3559 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3560 *
3561 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3562 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3563 * will be stored into the context1 field of the IOCB for the completion
3564 * callback function to the ADISC Accept response ELS IOCB command.
3565 *
3566 * Return code
3567 * 0 - Successfully issued acc adisc response
3568 * 1 - Failed to issue adisc acc response
3569 **/
3570 int
3571 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3572 struct lpfc_nodelist *ndlp)
3573 {
3574 struct lpfc_hba *phba = vport->phba;
3575 ADISC *ap;
3576 IOCB_t *icmd, *oldcmd;
3577 struct lpfc_iocbq *elsiocb;
3578 uint8_t *pcmd;
3579 uint16_t cmdsize;
3580 int rc;
3581
3582 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
3583 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3584 ndlp->nlp_DID, ELS_CMD_ACC);
3585 if (!elsiocb)
3586 return 1;
3587
3588 icmd = &elsiocb->iocb;
3589 oldcmd = &oldiocb->iocb;
3590 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3591
3592 /* Xmit ADISC ACC response tag <ulpIoTag> */
3593 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3594 "0130 Xmit ADISC ACC response iotag x%x xri: "
3595 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3596 elsiocb->iotag, elsiocb->iocb.ulpContext,
3597 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3598 ndlp->nlp_rpi);
3599 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3600
3601 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3602 pcmd += sizeof(uint32_t);
3603
3604 ap = (ADISC *) (pcmd);
3605 ap->hardAL_PA = phba->fc_pref_ALPA;
3606 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3607 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3608 ap->DID = be32_to_cpu(vport->fc_myDID);
3609
3610 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3611 "Issue ACC ADISC: did:x%x flg:x%x",
3612 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3613
3614 phba->fc_stat.elsXmitACC++;
3615 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3616 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3617 if (rc == IOCB_ERROR) {
3618 lpfc_els_free_iocb(phba, elsiocb);
3619 return 1;
3620 }
3621 return 0;
3622 }
3623
3624 /**
3625 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
3626 * @vport: pointer to a virtual N_Port data structure.
3627 * @oldiocb: pointer to the original lpfc command iocb data structure.
3628 * @ndlp: pointer to a node-list data structure.
3629 *
3630 * This routine prepares and issues an Accept (ACC) response to Process
3631 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3632 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3633 *
3634 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3635 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3636 * will be stored into the context1 field of the IOCB for the completion
3637 * callback function to the PRLI Accept response ELS IOCB command.
3638 *
3639 * Return code
3640 * 0 - Successfully issued acc prli response
3641 * 1 - Failed to issue acc prli response
3642 **/
3643 int
3644 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3645 struct lpfc_nodelist *ndlp)
3646 {
3647 struct lpfc_hba *phba = vport->phba;
3648 PRLI *npr;
3649 lpfc_vpd_t *vpd;
3650 IOCB_t *icmd;
3651 IOCB_t *oldcmd;
3652 struct lpfc_iocbq *elsiocb;
3653 struct lpfc_sli *psli;
3654 uint8_t *pcmd;
3655 uint16_t cmdsize;
3656 int rc;
3657
3658 psli = &phba->sli;
3659
3660 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3661 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3662 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
3663 if (!elsiocb)
3664 return 1;
3665
3666 icmd = &elsiocb->iocb;
3667 oldcmd = &oldiocb->iocb;
3668 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3669 /* Xmit PRLI ACC response tag <ulpIoTag> */
3670 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3671 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3672 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3673 elsiocb->iotag, elsiocb->iocb.ulpContext,
3674 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3675 ndlp->nlp_rpi);
3676 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3677
3678 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
3679 pcmd += sizeof(uint32_t);
3680
3681 /* For PRLI, remainder of payload is PRLI parameter page */
3682 memset(pcmd, 0, sizeof(PRLI));
3683
3684 npr = (PRLI *) pcmd;
3685 vpd = &phba->vpd;
3686 /*
3687 * If the remote port is a target and our firmware version is 3.20 or
3688 * later, set the following bits for FC-TAPE support.
3689 */
3690 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3691 (vpd->rev.feaLevelHigh >= 0x02)) {
3692 npr->ConfmComplAllowed = 1;
3693 npr->Retry = 1;
3694 npr->TaskRetryIdReq = 1;
3695 }
3696
3697 npr->acceptRspCode = PRLI_REQ_EXECUTED;
3698 npr->estabImagePair = 1;
3699 npr->readXferRdyDis = 1;
3700 npr->ConfmComplAllowed = 1;
3701
3702 npr->prliType = PRLI_FCP_TYPE;
3703 npr->initiatorFunc = 1;
3704
3705 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3706 "Issue ACC PRLI: did:x%x flg:x%x",
3707 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3708
3709 phba->fc_stat.elsXmitACC++;
3710 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3711
3712 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3713 if (rc == IOCB_ERROR) {
3714 lpfc_els_free_iocb(phba, elsiocb);
3715 return 1;
3716 }
3717 return 0;
3718 }
3719
3720 /**
3721 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
3722 * @vport: pointer to a virtual N_Port data structure.
3723 * @format: rnid command format.
3724 * @oldiocb: pointer to the original lpfc command iocb data structure.
3725 * @ndlp: pointer to a node-list data structure.
3726 *
3727 * This routine issues a Request Node Identification Data (RNID) Accept
3728 * (ACC) response. It constructs the RNID ACC response command according to
3729 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
3730 * issue the response. Note that this command does not need to hold the ndlp
3731 * reference count for the callback. So, the ndlp reference count taken by
3732 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
3733 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
3734 * there is no ndlp reference available.
3735 *
3736 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3737 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3738 * will be stored into the context1 field of the IOCB for the completion
3739 * callback function. However, for the RNID Accept Response ELS command,
3740 * this is undone later by this routine after the IOCB is allocated.
3741 *
3742 * Return code
3743 * 0 - Successfully issued acc rnid response
3744 * 1 - Failed to issue acc rnid response
3745 **/
3746 static int
3747 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3748 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
3749 {
3750 struct lpfc_hba *phba = vport->phba;
3751 RNID *rn;
3752 IOCB_t *icmd, *oldcmd;
3753 struct lpfc_iocbq *elsiocb;
3754 struct lpfc_sli *psli;
3755 uint8_t *pcmd;
3756 uint16_t cmdsize;
3757 int rc;
3758
3759 psli = &phba->sli;
3760 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3761 + (2 * sizeof(struct lpfc_name));
3762 if (format)
3763 cmdsize += sizeof(RNID_TOP_DISC);
3764
3765 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3766 ndlp->nlp_DID, ELS_CMD_ACC);
3767 if (!elsiocb)
3768 return 1;
3769
3770 icmd = &elsiocb->iocb;
3771 oldcmd = &oldiocb->iocb;
3772 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3773 /* Xmit RNID ACC response tag <ulpIoTag> */
3774 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3775 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
3776 elsiocb->iotag, elsiocb->iocb.ulpContext);
3777 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3778 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3779 pcmd += sizeof(uint32_t);
3780
3781 memset(pcmd, 0, sizeof(RNID));
3782 rn = (RNID *) (pcmd);
3783 rn->Format = format;
3784 rn->CommonLen = (2 * sizeof(struct lpfc_name));
3785 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3786 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3787 switch (format) {
3788 case 0:
3789 rn->SpecificLen = 0;
3790 break;
3791 case RNID_TOPOLOGY_DISC:
3792 rn->SpecificLen = sizeof(RNID_TOP_DISC);
3793 memcpy(&rn->un.topologyDisc.portName,
3794 &vport->fc_portname, sizeof(struct lpfc_name));
3795 rn->un.topologyDisc.unitType = RNID_HBA;
3796 rn->un.topologyDisc.physPort = 0;
3797 rn->un.topologyDisc.attachedNodes = 0;
3798 break;
3799 default:
3800 rn->CommonLen = 0;
3801 rn->SpecificLen = 0;
3802 break;
3803 }
3804
3805 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3806 "Issue ACC RNID: did:x%x flg:x%x",
3807 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3808
3809 phba->fc_stat.elsXmitACC++;
3810 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3811 lpfc_nlp_put(ndlp);
3812 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3813 * it could be freed */
3814
3815 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3816 if (rc == IOCB_ERROR) {
3817 lpfc_els_free_iocb(phba, elsiocb);
3818 return 1;
3819 }
3820 return 0;
3821 }
3822
3823 /**
3824 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
3825 * @vport: pointer to a host virtual N_Port data structure.
3826 *
3827 * This routine issues Address Discover (ADISC) ELS commands to those
3828 * N_Ports which are in node port recovery state and ADISC has not been issued
3829 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
3830 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
3831 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
3832 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
3833 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
3834 * IOCBs quit for later pick up. On the other hand, after walking through
3835 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
3836 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
3837 * no more ADISC need to be sent.
3838 *
3839 * Return code
3840 * The number of N_Ports with adisc issued.
3841 **/
3842 int
3843 lpfc_els_disc_adisc(struct lpfc_vport *vport)
3844 {
3845 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3846 struct lpfc_nodelist *ndlp, *next_ndlp;
3847 int sentadisc = 0;
3848
3849 /* go thru NPR nodes and issue any remaining ELS ADISCs */
3850 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3851 if (!NLP_CHK_NODE_ACT(ndlp))
3852 continue;
3853 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
3854 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
3855 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
3856 spin_lock_irq(shost->host_lock);
3857 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3858 spin_unlock_irq(shost->host_lock);
3859 ndlp->nlp_prev_state = ndlp->nlp_state;
3860 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3861 lpfc_issue_els_adisc(vport, ndlp, 0);
3862 sentadisc++;
3863 vport->num_disc_nodes++;
3864 if (vport->num_disc_nodes >=
3865 vport->cfg_discovery_threads) {
3866 spin_lock_irq(shost->host_lock);
3867 vport->fc_flag |= FC_NLP_MORE;
3868 spin_unlock_irq(shost->host_lock);
3869 break;
3870 }
3871 }
3872 }
3873 if (sentadisc == 0) {
3874 spin_lock_irq(shost->host_lock);
3875 vport->fc_flag &= ~FC_NLP_MORE;
3876 spin_unlock_irq(shost->host_lock);
3877 }
3878 return sentadisc;
3879 }
3880
3881 /**
3882 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
3883 * @vport: pointer to a host virtual N_Port data structure.
3884 *
3885 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
3886 * which are in node port recovery state, with a @vport. Each time an ELS
3887 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
3888 * the per @vport number of discover count (num_disc_nodes) shall be
3889 * incremented. If the num_disc_nodes reaches a pre-configured threshold
3890 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
3891 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
3892 * later pick up. On the other hand, after walking through all the ndlps with
3893 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
3894 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
3895 * PLOGI need to be sent.
3896 *
3897 * Return code
3898 * The number of N_Ports with plogi issued.
3899 **/
3900 int
3901 lpfc_els_disc_plogi(struct lpfc_vport *vport)
3902 {
3903 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3904 struct lpfc_nodelist *ndlp, *next_ndlp;
3905 int sentplogi = 0;
3906
3907 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
3908 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3909 if (!NLP_CHK_NODE_ACT(ndlp))
3910 continue;
3911 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
3912 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
3913 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
3914 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
3915 ndlp->nlp_prev_state = ndlp->nlp_state;
3916 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3917 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
3918 sentplogi++;
3919 vport->num_disc_nodes++;
3920 if (vport->num_disc_nodes >=
3921 vport->cfg_discovery_threads) {
3922 spin_lock_irq(shost->host_lock);
3923 vport->fc_flag |= FC_NLP_MORE;
3924 spin_unlock_irq(shost->host_lock);
3925 break;
3926 }
3927 }
3928 }
3929 if (sentplogi) {
3930 lpfc_set_disctmo(vport);
3931 }
3932 else {
3933 spin_lock_irq(shost->host_lock);
3934 vport->fc_flag &= ~FC_NLP_MORE;
3935 spin_unlock_irq(shost->host_lock);
3936 }
3937 return sentplogi;
3938 }
3939
3940 /**
3941 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
3942 * @vport: pointer to a host virtual N_Port data structure.
3943 *
3944 * This routine cleans up any Registration State Change Notification
3945 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
3946 * @vport together with the host_lock is used to prevent multiple thread
3947 * trying to access the RSCN array on a same @vport at the same time.
3948 **/
3949 void
3950 lpfc_els_flush_rscn(struct lpfc_vport *vport)
3951 {
3952 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3953 struct lpfc_hba *phba = vport->phba;
3954 int i;
3955
3956 spin_lock_irq(shost->host_lock);
3957 if (vport->fc_rscn_flush) {
3958 /* Another thread is walking fc_rscn_id_list on this vport */
3959 spin_unlock_irq(shost->host_lock);
3960 return;
3961 }
3962 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
3963 vport->fc_rscn_flush = 1;
3964 spin_unlock_irq(shost->host_lock);
3965
3966 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
3967 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
3968 vport->fc_rscn_id_list[i] = NULL;
3969 }
3970 spin_lock_irq(shost->host_lock);
3971 vport->fc_rscn_id_cnt = 0;
3972 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
3973 spin_unlock_irq(shost->host_lock);
3974 lpfc_can_disctmo(vport);
3975 /* Indicate we are done walking this fc_rscn_id_list */
3976 vport->fc_rscn_flush = 0;
3977 }
3978
3979 /**
3980 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
3981 * @vport: pointer to a host virtual N_Port data structure.
3982 * @did: remote destination port identifier.
3983 *
3984 * This routine checks whether there is any pending Registration State
3985 * Configuration Notification (RSCN) to a @did on @vport.
3986 *
3987 * Return code
3988 * None zero - The @did matched with a pending rscn
3989 * 0 - not able to match @did with a pending rscn
3990 **/
3991 int
3992 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3993 {
3994 D_ID ns_did;
3995 D_ID rscn_did;
3996 uint32_t *lp;
3997 uint32_t payload_len, i;
3998 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3999
4000 ns_did.un.word = did;
4001
4002 /* Never match fabric nodes for RSCNs */
4003 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4004 return 0;
4005
4006 /* If we are doing a FULL RSCN rediscovery, match everything */
4007 if (vport->fc_flag & FC_RSCN_DISCOVERY)
4008 return did;
4009
4010 spin_lock_irq(shost->host_lock);
4011 if (vport->fc_rscn_flush) {
4012 /* Another thread is walking fc_rscn_id_list on this vport */
4013 spin_unlock_irq(shost->host_lock);
4014 return 0;
4015 }
4016 /* Indicate we are walking fc_rscn_id_list on this vport */
4017 vport->fc_rscn_flush = 1;
4018 spin_unlock_irq(shost->host_lock);
4019 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4020 lp = vport->fc_rscn_id_list[i]->virt;
4021 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4022 payload_len -= sizeof(uint32_t); /* take off word 0 */
4023 while (payload_len) {
4024 rscn_did.un.word = be32_to_cpu(*lp++);
4025 payload_len -= sizeof(uint32_t);
4026 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4027 case RSCN_ADDRESS_FORMAT_PORT:
4028 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4029 && (ns_did.un.b.area == rscn_did.un.b.area)
4030 && (ns_did.un.b.id == rscn_did.un.b.id))
4031 goto return_did_out;
4032 break;
4033 case RSCN_ADDRESS_FORMAT_AREA:
4034 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4035 && (ns_did.un.b.area == rscn_did.un.b.area))
4036 goto return_did_out;
4037 break;
4038 case RSCN_ADDRESS_FORMAT_DOMAIN:
4039 if (ns_did.un.b.domain == rscn_did.un.b.domain)
4040 goto return_did_out;
4041 break;
4042 case RSCN_ADDRESS_FORMAT_FABRIC:
4043 goto return_did_out;
4044 }
4045 }
4046 }
4047 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4048 vport->fc_rscn_flush = 0;
4049 return 0;
4050 return_did_out:
4051 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4052 vport->fc_rscn_flush = 0;
4053 return did;
4054 }
4055
4056 /**
4057 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
4058 * @vport: pointer to a host virtual N_Port data structure.
4059 *
4060 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4061 * state machine for a @vport's nodes that are with pending RSCN (Registration
4062 * State Change Notification).
4063 *
4064 * Return code
4065 * 0 - Successful (currently alway return 0)
4066 **/
4067 static int
4068 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
4069 {
4070 struct lpfc_nodelist *ndlp = NULL;
4071
4072 /* Move all affected nodes by pending RSCNs to NPR state. */
4073 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4074 if (!NLP_CHK_NODE_ACT(ndlp) ||
4075 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4076 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
4077 continue;
4078 lpfc_disc_state_machine(vport, ndlp, NULL,
4079 NLP_EVT_DEVICE_RECOVERY);
4080 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4081 }
4082 return 0;
4083 }
4084
4085 /**
4086 * lpfc_send_rscn_event - Send an RSCN event to management application
4087 * @vport: pointer to a host virtual N_Port data structure.
4088 * @cmdiocb: pointer to lpfc command iocb data structure.
4089 *
4090 * lpfc_send_rscn_event sends an RSCN netlink event to management
4091 * applications.
4092 */
4093 static void
4094 lpfc_send_rscn_event(struct lpfc_vport *vport,
4095 struct lpfc_iocbq *cmdiocb)
4096 {
4097 struct lpfc_dmabuf *pcmd;
4098 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4099 uint32_t *payload_ptr;
4100 uint32_t payload_len;
4101 struct lpfc_rscn_event_header *rscn_event_data;
4102
4103 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4104 payload_ptr = (uint32_t *) pcmd->virt;
4105 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4106
4107 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4108 payload_len, GFP_KERNEL);
4109 if (!rscn_event_data) {
4110 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4111 "0147 Failed to allocate memory for RSCN event\n");
4112 return;
4113 }
4114 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4115 rscn_event_data->payload_length = payload_len;
4116 memcpy(rscn_event_data->rscn_payload, payload_ptr,
4117 payload_len);
4118
4119 fc_host_post_vendor_event(shost,
4120 fc_get_event_number(),
4121 sizeof(struct lpfc_els_event_header) + payload_len,
4122 (char *)rscn_event_data,
4123 LPFC_NL_VENDOR_ID);
4124
4125 kfree(rscn_event_data);
4126 }
4127
4128 /**
4129 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
4130 * @vport: pointer to a host virtual N_Port data structure.
4131 * @cmdiocb: pointer to lpfc command iocb data structure.
4132 * @ndlp: pointer to a node-list data structure.
4133 *
4134 * This routine processes an unsolicited RSCN (Registration State Change
4135 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4136 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4137 * discover state machine is about to begin discovery, it just accepts the
4138 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4139 * contains N_Port IDs for other vports on this HBA, it just accepts the
4140 * RSCN and ignore processing it. If the state machine is in the recovery
4141 * state, the fc_rscn_id_list of this @vport is walked and the
4142 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4143 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4144 * routine is invoked to handle the RSCN event.
4145 *
4146 * Return code
4147 * 0 - Just sent the acc response
4148 * 1 - Sent the acc response and waited for name server completion
4149 **/
4150 static int
4151 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4152 struct lpfc_nodelist *ndlp)
4153 {
4154 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4155 struct lpfc_hba *phba = vport->phba;
4156 struct lpfc_dmabuf *pcmd;
4157 uint32_t *lp, *datap;
4158 IOCB_t *icmd;
4159 uint32_t payload_len, length, nportid, *cmd;
4160 int rscn_cnt;
4161 int rscn_id = 0, hba_id = 0;
4162 int i;
4163
4164 icmd = &cmdiocb->iocb;
4165 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4166 lp = (uint32_t *) pcmd->virt;
4167
4168 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4169 payload_len -= sizeof(uint32_t); /* take off word 0 */
4170 /* RSCN received */
4171 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4172 "0214 RSCN received Data: x%x x%x x%x x%x\n",
4173 vport->fc_flag, payload_len, *lp,
4174 vport->fc_rscn_id_cnt);
4175
4176 /* Send an RSCN event to the management application */
4177 lpfc_send_rscn_event(vport, cmdiocb);
4178
4179 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
4180 fc_host_post_event(shost, fc_get_event_number(),
4181 FCH_EVT_RSCN, lp[i]);
4182
4183 /* If we are about to begin discovery, just ACC the RSCN.
4184 * Discovery processing will satisfy it.
4185 */
4186 if (vport->port_state <= LPFC_NS_QRY) {
4187 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4188 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4189 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4190
4191 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4192 return 0;
4193 }
4194
4195 /* If this RSCN just contains NPortIDs for other vports on this HBA,
4196 * just ACC and ignore it.
4197 */
4198 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4199 !(vport->cfg_peer_port_login)) {
4200 i = payload_len;
4201 datap = lp;
4202 while (i > 0) {
4203 nportid = *datap++;
4204 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4205 i -= sizeof(uint32_t);
4206 rscn_id++;
4207 if (lpfc_find_vport_by_did(phba, nportid))
4208 hba_id++;
4209 }
4210 if (rscn_id == hba_id) {
4211 /* ALL NPortIDs in RSCN are on HBA */
4212 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4213 "0219 Ignore RSCN "
4214 "Data: x%x x%x x%x x%x\n",
4215 vport->fc_flag, payload_len,
4216 *lp, vport->fc_rscn_id_cnt);
4217 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4218 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4219 ndlp->nlp_DID, vport->port_state,
4220 ndlp->nlp_flag);
4221
4222 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
4223 ndlp, NULL);
4224 return 0;
4225 }
4226 }
4227
4228 spin_lock_irq(shost->host_lock);
4229 if (vport->fc_rscn_flush) {
4230 /* Another thread is walking fc_rscn_id_list on this vport */
4231 vport->fc_flag |= FC_RSCN_DISCOVERY;
4232 spin_unlock_irq(shost->host_lock);
4233 /* Send back ACC */
4234 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4235 return 0;
4236 }
4237 /* Indicate we are walking fc_rscn_id_list on this vport */
4238 vport->fc_rscn_flush = 1;
4239 spin_unlock_irq(shost->host_lock);
4240 /* Get the array count after successfully have the token */
4241 rscn_cnt = vport->fc_rscn_id_cnt;
4242 /* If we are already processing an RSCN, save the received
4243 * RSCN payload buffer, cmdiocb->context2 to process later.
4244 */
4245 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
4246 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4247 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4248 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4249
4250 spin_lock_irq(shost->host_lock);
4251 vport->fc_flag |= FC_RSCN_DEFERRED;
4252 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
4253 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
4254 vport->fc_flag |= FC_RSCN_MODE;
4255 spin_unlock_irq(shost->host_lock);
4256 if (rscn_cnt) {
4257 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4258 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4259 }
4260 if ((rscn_cnt) &&
4261 (payload_len + length <= LPFC_BPL_SIZE)) {
4262 *cmd &= ELS_CMD_MASK;
4263 *cmd |= cpu_to_be32(payload_len + length);
4264 memcpy(((uint8_t *)cmd) + length, lp,
4265 payload_len);
4266 } else {
4267 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4268 vport->fc_rscn_id_cnt++;
4269 /* If we zero, cmdiocb->context2, the calling
4270 * routine will not try to free it.
4271 */
4272 cmdiocb->context2 = NULL;
4273 }
4274 /* Deferred RSCN */
4275 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4276 "0235 Deferred RSCN "
4277 "Data: x%x x%x x%x\n",
4278 vport->fc_rscn_id_cnt, vport->fc_flag,
4279 vport->port_state);
4280 } else {
4281 vport->fc_flag |= FC_RSCN_DISCOVERY;
4282 spin_unlock_irq(shost->host_lock);
4283 /* ReDiscovery RSCN */
4284 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4285 "0234 ReDiscovery RSCN "
4286 "Data: x%x x%x x%x\n",
4287 vport->fc_rscn_id_cnt, vport->fc_flag,
4288 vport->port_state);
4289 }
4290 /* Indicate we are done walking fc_rscn_id_list on this vport */
4291 vport->fc_rscn_flush = 0;
4292 /* Send back ACC */
4293 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4294 /* send RECOVERY event for ALL nodes that match RSCN payload */
4295 lpfc_rscn_recovery_check(vport);
4296 spin_lock_irq(shost->host_lock);
4297 vport->fc_flag &= ~FC_RSCN_DEFERRED;
4298 spin_unlock_irq(shost->host_lock);
4299 return 0;
4300 }
4301 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4302 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4303 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4304
4305 spin_lock_irq(shost->host_lock);
4306 vport->fc_flag |= FC_RSCN_MODE;
4307 spin_unlock_irq(shost->host_lock);
4308 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
4309 /* Indicate we are done walking fc_rscn_id_list on this vport */
4310 vport->fc_rscn_flush = 0;
4311 /*
4312 * If we zero, cmdiocb->context2, the calling routine will
4313 * not try to free it.
4314 */
4315 cmdiocb->context2 = NULL;
4316 lpfc_set_disctmo(vport);
4317 /* Send back ACC */
4318 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4319 /* send RECOVERY event for ALL nodes that match RSCN payload */
4320 lpfc_rscn_recovery_check(vport);
4321 return lpfc_els_handle_rscn(vport);
4322 }
4323
4324 /**
4325 * lpfc_els_handle_rscn - Handle rscn for a vport
4326 * @vport: pointer to a host virtual N_Port data structure.
4327 *
4328 * This routine handles the Registration State Configuration Notification
4329 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4330 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4331 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4332 * NameServer shall be issued. If CT command to the NameServer fails to be
4333 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4334 * RSCN activities with the @vport.
4335 *
4336 * Return code
4337 * 0 - Cleaned up rscn on the @vport
4338 * 1 - Wait for plogi to name server before proceed
4339 **/
4340 int
4341 lpfc_els_handle_rscn(struct lpfc_vport *vport)
4342 {
4343 struct lpfc_nodelist *ndlp;
4344 struct lpfc_hba *phba = vport->phba;
4345
4346 /* Ignore RSCN if the port is being torn down. */
4347 if (vport->load_flag & FC_UNLOADING) {
4348 lpfc_els_flush_rscn(vport);
4349 return 0;
4350 }
4351
4352 /* Start timer for RSCN processing */
4353 lpfc_set_disctmo(vport);
4354
4355 /* RSCN processed */
4356 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4357 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4358 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4359 vport->port_state);
4360
4361 /* To process RSCN, first compare RSCN data with NameServer */
4362 vport->fc_ns_retry = 0;
4363 vport->num_disc_nodes = 0;
4364
4365 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4366 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4367 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
4368 /* Good ndlp, issue CT Request to NameServer */
4369 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
4370 /* Wait for NameServer query cmpl before we can
4371 continue */
4372 return 1;
4373 } else {
4374 /* If login to NameServer does not exist, issue one */
4375 /* Good status, issue PLOGI to NameServer */
4376 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4377 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
4378 /* Wait for NameServer login cmpl before we can
4379 continue */
4380 return 1;
4381
4382 if (ndlp) {
4383 ndlp = lpfc_enable_node(vport, ndlp,
4384 NLP_STE_PLOGI_ISSUE);
4385 if (!ndlp) {
4386 lpfc_els_flush_rscn(vport);
4387 return 0;
4388 }
4389 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
4390 } else {
4391 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4392 if (!ndlp) {
4393 lpfc_els_flush_rscn(vport);
4394 return 0;
4395 }
4396 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4397 ndlp->nlp_prev_state = ndlp->nlp_state;
4398 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4399 }
4400 ndlp->nlp_type |= NLP_FABRIC;
4401 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
4402 /* Wait for NameServer login cmpl before we can
4403 * continue
4404 */
4405 return 1;
4406 }
4407
4408 lpfc_els_flush_rscn(vport);
4409 return 0;
4410 }
4411
4412 /**
4413 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
4414 * @vport: pointer to a host virtual N_Port data structure.
4415 * @cmdiocb: pointer to lpfc command iocb data structure.
4416 * @ndlp: pointer to a node-list data structure.
4417 *
4418 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4419 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4420 * point topology. As an unsolicited FLOGI should not be received in a loop
4421 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4422 * lpfc_check_sparm() routine is invoked to check the parameters in the
4423 * unsolicited FLOGI. If parameters validation failed, the routine
4424 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4425 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4426 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4427 * will initiate PLOGI. The higher lexicographical value party shall has
4428 * higher priority (as the winning port) and will initiate PLOGI and
4429 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4430 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4431 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4432 *
4433 * Return code
4434 * 0 - Successfully processed the unsolicited flogi
4435 * 1 - Failed to process the unsolicited flogi
4436 **/
4437 static int
4438 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4439 struct lpfc_nodelist *ndlp)
4440 {
4441 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4442 struct lpfc_hba *phba = vport->phba;
4443 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4444 uint32_t *lp = (uint32_t *) pcmd->virt;
4445 IOCB_t *icmd = &cmdiocb->iocb;
4446 struct serv_parm *sp;
4447 LPFC_MBOXQ_t *mbox;
4448 struct ls_rjt stat;
4449 uint32_t cmd, did;
4450 int rc;
4451
4452 cmd = *lp++;
4453 sp = (struct serv_parm *) lp;
4454
4455 /* FLOGI received */
4456
4457 lpfc_set_disctmo(vport);
4458
4459 if (phba->fc_topology == TOPOLOGY_LOOP) {
4460 /* We should never receive a FLOGI in loop mode, ignore it */
4461 did = icmd->un.elsreq64.remoteID;
4462
4463 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
4464 Loop Mode */
4465 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4466 "0113 An FLOGI ELS command x%x was "
4467 "received from DID x%x in Loop Mode\n",
4468 cmd, did);
4469 return 1;
4470 }
4471
4472 did = Fabric_DID;
4473
4474 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4475 /* For a FLOGI we accept, then if our portname is greater
4476 * then the remote portname we initiate Nport login.
4477 */
4478
4479 rc = memcmp(&vport->fc_portname, &sp->portName,
4480 sizeof(struct lpfc_name));
4481
4482 if (!rc) {
4483 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4484 if (!mbox)
4485 return 1;
4486
4487 lpfc_linkdown(phba);
4488 lpfc_init_link(phba, mbox,
4489 phba->cfg_topology,
4490 phba->cfg_link_speed);
4491 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4492 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4493 mbox->vport = vport;
4494 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4495 lpfc_set_loopback_flag(phba);
4496 if (rc == MBX_NOT_FINISHED) {
4497 mempool_free(mbox, phba->mbox_mem_pool);
4498 }
4499 return 1;
4500 } else if (rc > 0) { /* greater than */
4501 spin_lock_irq(shost->host_lock);
4502 vport->fc_flag |= FC_PT2PT_PLOGI;
4503 spin_unlock_irq(shost->host_lock);
4504 }
4505 spin_lock_irq(shost->host_lock);
4506 vport->fc_flag |= FC_PT2PT;
4507 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4508 spin_unlock_irq(shost->host_lock);
4509 } else {
4510 /* Reject this request because invalid parameters */
4511 stat.un.b.lsRjtRsvd0 = 0;
4512 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4513 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4514 stat.un.b.vendorUnique = 0;
4515 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4516 NULL);
4517 return 1;
4518 }
4519
4520 /* Send back ACC */
4521 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
4522
4523 return 0;
4524 }
4525
4526 /**
4527 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
4528 * @vport: pointer to a host virtual N_Port data structure.
4529 * @cmdiocb: pointer to lpfc command iocb data structure.
4530 * @ndlp: pointer to a node-list data structure.
4531 *
4532 * This routine processes Request Node Identification Data (RNID) IOCB
4533 * received as an ELS unsolicited event. Only when the RNID specified format
4534 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4535 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4536 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4537 * rejected by invoking the lpfc_els_rsp_reject() routine.
4538 *
4539 * Return code
4540 * 0 - Successfully processed rnid iocb (currently always return 0)
4541 **/
4542 static int
4543 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4544 struct lpfc_nodelist *ndlp)
4545 {
4546 struct lpfc_dmabuf *pcmd;
4547 uint32_t *lp;
4548 IOCB_t *icmd;
4549 RNID *rn;
4550 struct ls_rjt stat;
4551 uint32_t cmd, did;
4552
4553 icmd = &cmdiocb->iocb;
4554 did = icmd->un.elsreq64.remoteID;
4555 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4556 lp = (uint32_t *) pcmd->virt;
4557
4558 cmd = *lp++;
4559 rn = (RNID *) lp;
4560
4561 /* RNID received */
4562
4563 switch (rn->Format) {
4564 case 0:
4565 case RNID_TOPOLOGY_DISC:
4566 /* Send back ACC */
4567 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
4568 break;
4569 default:
4570 /* Reject this request because format not supported */
4571 stat.un.b.lsRjtRsvd0 = 0;
4572 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4573 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4574 stat.un.b.vendorUnique = 0;
4575 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4576 NULL);
4577 }
4578 return 0;
4579 }
4580
4581 /**
4582 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
4583 * @vport: pointer to a host virtual N_Port data structure.
4584 * @cmdiocb: pointer to lpfc command iocb data structure.
4585 * @ndlp: pointer to a node-list data structure.
4586 *
4587 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4588 * received as an ELS unsolicited event. Currently, this function just invokes
4589 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4590 *
4591 * Return code
4592 * 0 - Successfully processed lirr iocb (currently always return 0)
4593 **/
4594 static int
4595 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4596 struct lpfc_nodelist *ndlp)
4597 {
4598 struct ls_rjt stat;
4599
4600 /* For now, unconditionally reject this command */
4601 stat.un.b.lsRjtRsvd0 = 0;
4602 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4603 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4604 stat.un.b.vendorUnique = 0;
4605 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
4606 return 0;
4607 }
4608
4609 /**
4610 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
4611 * @vport: pointer to a host virtual N_Port data structure.
4612 * @cmdiocb: pointer to lpfc command iocb data structure.
4613 * @ndlp: pointer to a node-list data structure.
4614 *
4615 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
4616 * received as an ELS unsolicited event. A request to RRQ shall only
4617 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
4618 * Nx_Port N_Port_ID of the target Exchange is the same as the
4619 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
4620 * not accepted, an LS_RJT with reason code "Unable to perform
4621 * command request" and reason code explanation "Invalid Originator
4622 * S_ID" shall be returned. For now, we just unconditionally accept
4623 * RRQ from the target.
4624 **/
4625 static void
4626 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4627 struct lpfc_nodelist *ndlp)
4628 {
4629 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4630 }
4631
4632 /**
4633 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4634 * @phba: pointer to lpfc hba data structure.
4635 * @pmb: pointer to the driver internal queue element for mailbox command.
4636 *
4637 * This routine is the completion callback function for the MBX_READ_LNK_STAT
4638 * mailbox command. This callback function is to actually send the Accept
4639 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
4640 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
4641 * mailbox command, constructs the RPS response with the link statistics
4642 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
4643 * response to the RPS.
4644 *
4645 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4646 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4647 * will be stored into the context1 field of the IOCB for the completion
4648 * callback function to the RPS Accept Response ELS IOCB command.
4649 *
4650 **/
4651 static void
4652 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4653 {
4654 MAILBOX_t *mb;
4655 IOCB_t *icmd;
4656 RPS_RSP *rps_rsp;
4657 uint8_t *pcmd;
4658 struct lpfc_iocbq *elsiocb;
4659 struct lpfc_nodelist *ndlp;
4660 uint16_t xri, status;
4661 uint32_t cmdsize;
4662
4663 mb = &pmb->u.mb;
4664
4665 ndlp = (struct lpfc_nodelist *) pmb->context2;
4666 xri = (uint16_t) ((unsigned long)(pmb->context1));
4667 pmb->context1 = NULL;
4668 pmb->context2 = NULL;
4669
4670 if (mb->mbxStatus) {
4671 mempool_free(pmb, phba->mbox_mem_pool);
4672 return;
4673 }
4674
4675 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
4676 mempool_free(pmb, phba->mbox_mem_pool);
4677 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
4678 lpfc_max_els_tries, ndlp,
4679 ndlp->nlp_DID, ELS_CMD_ACC);
4680
4681 /* Decrement the ndlp reference count from previous mbox command */
4682 lpfc_nlp_put(ndlp);
4683
4684 if (!elsiocb)
4685 return;
4686
4687 icmd = &elsiocb->iocb;
4688 icmd->ulpContext = xri;
4689
4690 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4691 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4692 pcmd += sizeof(uint32_t); /* Skip past command */
4693 rps_rsp = (RPS_RSP *)pcmd;
4694
4695 if (phba->fc_topology != TOPOLOGY_LOOP)
4696 status = 0x10;
4697 else
4698 status = 0x8;
4699 if (phba->pport->fc_flag & FC_FABRIC)
4700 status |= 0x4;
4701
4702 rps_rsp->rsvd1 = 0;
4703 rps_rsp->portStatus = cpu_to_be16(status);
4704 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
4705 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
4706 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
4707 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
4708 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
4709 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
4710 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
4711 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
4712 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
4713 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4714 elsiocb->iotag, elsiocb->iocb.ulpContext,
4715 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4716 ndlp->nlp_rpi);
4717 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4718 phba->fc_stat.elsXmitACC++;
4719 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4720 lpfc_els_free_iocb(phba, elsiocb);
4721 return;
4722 }
4723
4724 /**
4725 * lpfc_els_rcv_rps - Process an unsolicited rps iocb
4726 * @vport: pointer to a host virtual N_Port data structure.
4727 * @cmdiocb: pointer to lpfc command iocb data structure.
4728 * @ndlp: pointer to a node-list data structure.
4729 *
4730 * This routine processes Read Port Status (RPS) IOCB received as an
4731 * ELS unsolicited event. It first checks the remote port state. If the
4732 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
4733 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
4734 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
4735 * for reading the HBA link statistics. It is for the callback function,
4736 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
4737 * to actually sending out RPS Accept (ACC) response.
4738 *
4739 * Return codes
4740 * 0 - Successfully processed rps iocb (currently always return 0)
4741 **/
4742 static int
4743 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4744 struct lpfc_nodelist *ndlp)
4745 {
4746 struct lpfc_hba *phba = vport->phba;
4747 uint32_t *lp;
4748 uint8_t flag;
4749 LPFC_MBOXQ_t *mbox;
4750 struct lpfc_dmabuf *pcmd;
4751 RPS *rps;
4752 struct ls_rjt stat;
4753
4754 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
4755 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
4756 /* reject the unsolicited RPS request and done with it */
4757 goto reject_out;
4758
4759 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4760 lp = (uint32_t *) pcmd->virt;
4761 flag = (be32_to_cpu(*lp++) & 0xf);
4762 rps = (RPS *) lp;
4763
4764 if ((flag == 0) ||
4765 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
4766 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
4767 sizeof(struct lpfc_name)) == 0))) {
4768
4769 printk("Fix me....\n");
4770 dump_stack();
4771 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
4772 if (mbox) {
4773 lpfc_read_lnk_stat(phba, mbox);
4774 mbox->context1 =
4775 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
4776 mbox->context2 = lpfc_nlp_get(ndlp);
4777 mbox->vport = vport;
4778 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
4779 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4780 != MBX_NOT_FINISHED)
4781 /* Mbox completion will send ELS Response */
4782 return 0;
4783 /* Decrement reference count used for the failed mbox
4784 * command.
4785 */
4786 lpfc_nlp_put(ndlp);
4787 mempool_free(mbox, phba->mbox_mem_pool);
4788 }
4789 }
4790
4791 reject_out:
4792 /* issue rejection response */
4793 stat.un.b.lsRjtRsvd0 = 0;
4794 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4795 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4796 stat.un.b.vendorUnique = 0;
4797 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
4798 return 0;
4799 }
4800
4801 /**
4802 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
4803 * @vport: pointer to a host virtual N_Port data structure.
4804 * @cmdsize: size of the ELS command.
4805 * @oldiocb: pointer to the original lpfc command iocb data structure.
4806 * @ndlp: pointer to a node-list data structure.
4807 *
4808 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
4809 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
4810 *
4811 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4812 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4813 * will be stored into the context1 field of the IOCB for the completion
4814 * callback function to the RPL Accept Response ELS command.
4815 *
4816 * Return code
4817 * 0 - Successfully issued ACC RPL ELS command
4818 * 1 - Failed to issue ACC RPL ELS command
4819 **/
4820 static int
4821 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4822 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4823 {
4824 struct lpfc_hba *phba = vport->phba;
4825 IOCB_t *icmd, *oldcmd;
4826 RPL_RSP rpl_rsp;
4827 struct lpfc_iocbq *elsiocb;
4828 uint8_t *pcmd;
4829
4830 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4831 ndlp->nlp_DID, ELS_CMD_ACC);
4832
4833 if (!elsiocb)
4834 return 1;
4835
4836 icmd = &elsiocb->iocb;
4837 oldcmd = &oldiocb->iocb;
4838 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
4839
4840 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4841 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4842 pcmd += sizeof(uint16_t);
4843 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
4844 pcmd += sizeof(uint16_t);
4845
4846 /* Setup the RPL ACC payload */
4847 rpl_rsp.listLen = be32_to_cpu(1);
4848 rpl_rsp.index = 0;
4849 rpl_rsp.port_num_blk.portNum = 0;
4850 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
4851 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
4852 sizeof(struct lpfc_name));
4853 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
4854 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
4855 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4856 "0120 Xmit ELS RPL ACC response tag x%x "
4857 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4858 "rpi x%x\n",
4859 elsiocb->iotag, elsiocb->iocb.ulpContext,
4860 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4861 ndlp->nlp_rpi);
4862 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4863 phba->fc_stat.elsXmitACC++;
4864 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4865 IOCB_ERROR) {
4866 lpfc_els_free_iocb(phba, elsiocb);
4867 return 1;
4868 }
4869 return 0;
4870 }
4871
4872 /**
4873 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
4874 * @vport: pointer to a host virtual N_Port data structure.
4875 * @cmdiocb: pointer to lpfc command iocb data structure.
4876 * @ndlp: pointer to a node-list data structure.
4877 *
4878 * This routine processes Read Port List (RPL) IOCB received as an ELS
4879 * unsolicited event. It first checks the remote port state. If the remote
4880 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
4881 * invokes the lpfc_els_rsp_reject() routine to send reject response.
4882 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
4883 * to accept the RPL.
4884 *
4885 * Return code
4886 * 0 - Successfully processed rpl iocb (currently always return 0)
4887 **/
4888 static int
4889 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4890 struct lpfc_nodelist *ndlp)
4891 {
4892 struct lpfc_dmabuf *pcmd;
4893 uint32_t *lp;
4894 uint32_t maxsize;
4895 uint16_t cmdsize;
4896 RPL *rpl;
4897 struct ls_rjt stat;
4898
4899 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
4900 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
4901 /* issue rejection response */
4902 stat.un.b.lsRjtRsvd0 = 0;
4903 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4904 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4905 stat.un.b.vendorUnique = 0;
4906 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4907 NULL);
4908 /* rejected the unsolicited RPL request and done with it */
4909 return 0;
4910 }
4911
4912 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4913 lp = (uint32_t *) pcmd->virt;
4914 rpl = (RPL *) (lp + 1);
4915
4916 maxsize = be32_to_cpu(rpl->maxsize);
4917
4918 /* We support only one port */
4919 if ((rpl->index == 0) &&
4920 ((maxsize == 0) ||
4921 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
4922 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
4923 } else {
4924 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
4925 }
4926 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
4927
4928 return 0;
4929 }
4930
4931 /**
4932 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
4933 * @vport: pointer to a virtual N_Port data structure.
4934 * @cmdiocb: pointer to lpfc command iocb data structure.
4935 * @ndlp: pointer to a node-list data structure.
4936 *
4937 * This routine processes Fibre Channel Address Resolution Protocol
4938 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
4939 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
4940 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
4941 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
4942 * remote PortName is compared against the FC PortName stored in the @vport
4943 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
4944 * compared against the FC NodeName stored in the @vport data structure.
4945 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
4946 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
4947 * invoked to send out FARP Response to the remote node. Before sending the
4948 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
4949 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
4950 * routine is invoked to log into the remote port first.
4951 *
4952 * Return code
4953 * 0 - Either the FARP Match Mode not supported or successfully processed
4954 **/
4955 static int
4956 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4957 struct lpfc_nodelist *ndlp)
4958 {
4959 struct lpfc_dmabuf *pcmd;
4960 uint32_t *lp;
4961 IOCB_t *icmd;
4962 FARP *fp;
4963 uint32_t cmd, cnt, did;
4964
4965 icmd = &cmdiocb->iocb;
4966 did = icmd->un.elsreq64.remoteID;
4967 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4968 lp = (uint32_t *) pcmd->virt;
4969
4970 cmd = *lp++;
4971 fp = (FARP *) lp;
4972 /* FARP-REQ received from DID <did> */
4973 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4974 "0601 FARP-REQ received from DID x%x\n", did);
4975 /* We will only support match on WWPN or WWNN */
4976 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
4977 return 0;
4978 }
4979
4980 cnt = 0;
4981 /* If this FARP command is searching for my portname */
4982 if (fp->Mflags & FARP_MATCH_PORT) {
4983 if (memcmp(&fp->RportName, &vport->fc_portname,
4984 sizeof(struct lpfc_name)) == 0)
4985 cnt = 1;
4986 }
4987
4988 /* If this FARP command is searching for my nodename */
4989 if (fp->Mflags & FARP_MATCH_NODE) {
4990 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
4991 sizeof(struct lpfc_name)) == 0)
4992 cnt = 1;
4993 }
4994
4995 if (cnt) {
4996 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
4997 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
4998 /* Log back into the node before sending the FARP. */
4999 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5000 ndlp->nlp_prev_state = ndlp->nlp_state;
5001 lpfc_nlp_set_state(vport, ndlp,
5002 NLP_STE_PLOGI_ISSUE);
5003 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5004 }
5005
5006 /* Send a FARP response to that node */
5007 if (fp->Rflags & FARP_REQUEST_FARPR)
5008 lpfc_issue_els_farpr(vport, did, 0);
5009 }
5010 }
5011 return 0;
5012 }
5013
5014 /**
5015 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
5016 * @vport: pointer to a host virtual N_Port data structure.
5017 * @cmdiocb: pointer to lpfc command iocb data structure.
5018 * @ndlp: pointer to a node-list data structure.
5019 *
5020 * This routine processes Fibre Channel Address Resolution Protocol
5021 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
5022 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
5023 * the FARP response request.
5024 *
5025 * Return code
5026 * 0 - Successfully processed FARPR IOCB (currently always return 0)
5027 **/
5028 static int
5029 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5030 struct lpfc_nodelist *ndlp)
5031 {
5032 struct lpfc_dmabuf *pcmd;
5033 uint32_t *lp;
5034 IOCB_t *icmd;
5035 uint32_t cmd, did;
5036
5037 icmd = &cmdiocb->iocb;
5038 did = icmd->un.elsreq64.remoteID;
5039 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5040 lp = (uint32_t *) pcmd->virt;
5041
5042 cmd = *lp++;
5043 /* FARP-RSP received from DID <did> */
5044 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5045 "0600 FARP-RSP received from DID x%x\n", did);
5046 /* ACCEPT the Farp resp request */
5047 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
5048
5049 return 0;
5050 }
5051
5052 /**
5053 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
5054 * @vport: pointer to a host virtual N_Port data structure.
5055 * @cmdiocb: pointer to lpfc command iocb data structure.
5056 * @fan_ndlp: pointer to a node-list data structure.
5057 *
5058 * This routine processes a Fabric Address Notification (FAN) IOCB
5059 * command received as an ELS unsolicited event. The FAN ELS command will
5060 * only be processed on a physical port (i.e., the @vport represents the
5061 * physical port). The fabric NodeName and PortName from the FAN IOCB are
5062 * compared against those in the phba data structure. If any of those is
5063 * different, the lpfc_initial_flogi() routine is invoked to initialize
5064 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
5065 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
5066 * is invoked to register login to the fabric.
5067 *
5068 * Return code
5069 * 0 - Successfully processed fan iocb (currently always return 0).
5070 **/
5071 static int
5072 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5073 struct lpfc_nodelist *fan_ndlp)
5074 {
5075 struct lpfc_hba *phba = vport->phba;
5076 uint32_t *lp;
5077 FAN *fp;
5078
5079 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
5080 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
5081 fp = (FAN *) ++lp;
5082 /* FAN received; Fan does not have a reply sequence */
5083 if ((vport == phba->pport) &&
5084 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5085 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
5086 sizeof(struct lpfc_name))) ||
5087 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
5088 sizeof(struct lpfc_name)))) {
5089 /* This port has switched fabrics. FLOGI is required */
5090 lpfc_initial_flogi(vport);
5091 } else {
5092 /* FAN verified - skip FLOGI */
5093 vport->fc_myDID = vport->fc_prevDID;
5094 if (phba->sli_rev < LPFC_SLI_REV4)
5095 lpfc_issue_fabric_reglogin(vport);
5096 else
5097 lpfc_issue_reg_vfi(vport);
5098 }
5099 }
5100 return 0;
5101 }
5102
5103 /**
5104 * lpfc_els_timeout - Handler funciton to the els timer
5105 * @ptr: holder for the timer function associated data.
5106 *
5107 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5108 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5109 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5110 * up the worker thread. It is for the worker thread to invoke the routine
5111 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5112 **/
5113 void
5114 lpfc_els_timeout(unsigned long ptr)
5115 {
5116 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5117 struct lpfc_hba *phba = vport->phba;
5118 uint32_t tmo_posted;
5119 unsigned long iflag;
5120
5121 spin_lock_irqsave(&vport->work_port_lock, iflag);
5122 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
5123 if (!tmo_posted)
5124 vport->work_port_events |= WORKER_ELS_TMO;
5125 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
5126
5127 if (!tmo_posted)
5128 lpfc_worker_wake_up(phba);
5129 return;
5130 }
5131
5132 /**
5133 * lpfc_els_timeout_handler - Process an els timeout event
5134 * @vport: pointer to a virtual N_Port data structure.
5135 *
5136 * This routine is the actual handler function that processes an ELS timeout
5137 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5138 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5139 * invoking the lpfc_sli_issue_abort_iotag() routine.
5140 **/
5141 void
5142 lpfc_els_timeout_handler(struct lpfc_vport *vport)
5143 {
5144 struct lpfc_hba *phba = vport->phba;
5145 struct lpfc_sli_ring *pring;
5146 struct lpfc_iocbq *tmp_iocb, *piocb;
5147 IOCB_t *cmd = NULL;
5148 struct lpfc_dmabuf *pcmd;
5149 uint32_t els_command = 0;
5150 uint32_t timeout;
5151 uint32_t remote_ID = 0xffffffff;
5152
5153 spin_lock_irq(&phba->hbalock);
5154 timeout = (uint32_t)(phba->fc_ratov << 1);
5155
5156 pring = &phba->sli.ring[LPFC_ELS_RING];
5157
5158 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5159 cmd = &piocb->iocb;
5160
5161 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
5162 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
5163 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5164 continue;
5165
5166 if (piocb->vport != vport)
5167 continue;
5168
5169 pcmd = (struct lpfc_dmabuf *) piocb->context2;
5170 if (pcmd)
5171 els_command = *(uint32_t *) (pcmd->virt);
5172
5173 if (els_command == ELS_CMD_FARP ||
5174 els_command == ELS_CMD_FARPR ||
5175 els_command == ELS_CMD_FDISC)
5176 continue;
5177
5178 if (piocb->drvrTimeout > 0) {
5179 if (piocb->drvrTimeout >= timeout)
5180 piocb->drvrTimeout -= timeout;
5181 else
5182 piocb->drvrTimeout = 0;
5183 continue;
5184 }
5185
5186 remote_ID = 0xffffffff;
5187 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
5188 remote_ID = cmd->un.elsreq64.remoteID;
5189 else {
5190 struct lpfc_nodelist *ndlp;
5191 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
5192 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5193 remote_ID = ndlp->nlp_DID;
5194 }
5195 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5196 "0127 ELS timeout Data: x%x x%x x%x "
5197 "x%x\n", els_command,
5198 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5199 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5200 }
5201 spin_unlock_irq(&phba->hbalock);
5202
5203 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5204 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
5205 }
5206
5207 /**
5208 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
5209 * @vport: pointer to a host virtual N_Port data structure.
5210 *
5211 * This routine is used to clean up all the outstanding ELS commands on a
5212 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5213 * routine. After that, it walks the ELS transmit queue to remove all the
5214 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5215 * the IOCBs with a non-NULL completion callback function, the callback
5216 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5217 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5218 * callback function, the IOCB will simply be released. Finally, it walks
5219 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5220 * completion queue IOCB that is associated with the @vport and is not
5221 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5222 * part of the discovery state machine) out to HBA by invoking the
5223 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5224 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5225 * the IOCBs are aborted when this function returns.
5226 **/
5227 void
5228 lpfc_els_flush_cmd(struct lpfc_vport *vport)
5229 {
5230 LIST_HEAD(completions);
5231 struct lpfc_hba *phba = vport->phba;
5232 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5233 struct lpfc_iocbq *tmp_iocb, *piocb;
5234 IOCB_t *cmd = NULL;
5235
5236 lpfc_fabric_abort_vport(vport);
5237
5238 spin_lock_irq(&phba->hbalock);
5239 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5240 cmd = &piocb->iocb;
5241
5242 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5243 continue;
5244 }
5245
5246 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5247 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5248 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5249 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5250 cmd->ulpCommand == CMD_ABORT_XRI_CN)
5251 continue;
5252
5253 if (piocb->vport != vport)
5254 continue;
5255
5256 list_move_tail(&piocb->list, &completions);
5257 pring->txq_cnt--;
5258 }
5259
5260 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5261 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5262 continue;
5263 }
5264
5265 if (piocb->vport != vport)
5266 continue;
5267
5268 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5269 }
5270 spin_unlock_irq(&phba->hbalock);
5271
5272 /* Cancell all the IOCBs from the completions list */
5273 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5274 IOERR_SLI_ABORTED);
5275
5276 return;
5277 }
5278
5279 /**
5280 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
5281 * @phba: pointer to lpfc hba data structure.
5282 *
5283 * This routine is used to clean up all the outstanding ELS commands on a
5284 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
5285 * routine. After that, it walks the ELS transmit queue to remove all the
5286 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
5287 * the IOCBs with the completion callback function associated, the callback
5288 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5289 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
5290 * callback function associated, the IOCB will simply be released. Finally,
5291 * it walks the ELS transmit completion queue to issue an abort IOCB to any
5292 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
5293 * management plane IOCBs that are not part of the discovery state machine)
5294 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
5295 **/
5296 void
5297 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
5298 {
5299 LIST_HEAD(completions);
5300 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5301 struct lpfc_iocbq *tmp_iocb, *piocb;
5302 IOCB_t *cmd = NULL;
5303
5304 lpfc_fabric_abort_hba(phba);
5305 spin_lock_irq(&phba->hbalock);
5306 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5307 cmd = &piocb->iocb;
5308 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5309 continue;
5310 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5311 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5312 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5313 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5314 cmd->ulpCommand == CMD_ABORT_XRI_CN)
5315 continue;
5316 list_move_tail(&piocb->list, &completions);
5317 pring->txq_cnt--;
5318 }
5319 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5320 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5321 continue;
5322 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5323 }
5324 spin_unlock_irq(&phba->hbalock);
5325
5326 /* Cancel all the IOCBs from the completions list */
5327 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5328 IOERR_SLI_ABORTED);
5329
5330 return;
5331 }
5332
5333 /**
5334 * lpfc_send_els_failure_event - Posts an ELS command failure event
5335 * @phba: Pointer to hba context object.
5336 * @cmdiocbp: Pointer to command iocb which reported error.
5337 * @rspiocbp: Pointer to response iocb which reported error.
5338 *
5339 * This function sends an event when there is an ELS command
5340 * failure.
5341 **/
5342 void
5343 lpfc_send_els_failure_event(struct lpfc_hba *phba,
5344 struct lpfc_iocbq *cmdiocbp,
5345 struct lpfc_iocbq *rspiocbp)
5346 {
5347 struct lpfc_vport *vport = cmdiocbp->vport;
5348 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5349 struct lpfc_lsrjt_event lsrjt_event;
5350 struct lpfc_fabric_event_header fabric_event;
5351 struct ls_rjt stat;
5352 struct lpfc_nodelist *ndlp;
5353 uint32_t *pcmd;
5354
5355 ndlp = cmdiocbp->context1;
5356 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5357 return;
5358
5359 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
5360 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
5361 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
5362 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
5363 sizeof(struct lpfc_name));
5364 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
5365 sizeof(struct lpfc_name));
5366 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
5367 cmdiocbp->context2)->virt);
5368 lsrjt_event.command = *pcmd;
5369 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
5370 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
5371 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
5372 fc_host_post_vendor_event(shost,
5373 fc_get_event_number(),
5374 sizeof(lsrjt_event),
5375 (char *)&lsrjt_event,
5376 LPFC_NL_VENDOR_ID);
5377 return;
5378 }
5379 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
5380 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
5381 fabric_event.event_type = FC_REG_FABRIC_EVENT;
5382 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
5383 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
5384 else
5385 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
5386 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
5387 sizeof(struct lpfc_name));
5388 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
5389 sizeof(struct lpfc_name));
5390 fc_host_post_vendor_event(shost,
5391 fc_get_event_number(),
5392 sizeof(fabric_event),
5393 (char *)&fabric_event,
5394 LPFC_NL_VENDOR_ID);
5395 return;
5396 }
5397
5398 }
5399
5400 /**
5401 * lpfc_send_els_event - Posts unsolicited els event
5402 * @vport: Pointer to vport object.
5403 * @ndlp: Pointer FC node object.
5404 * @cmd: ELS command code.
5405 *
5406 * This function posts an event when there is an incoming
5407 * unsolicited ELS command.
5408 **/
5409 static void
5410 lpfc_send_els_event(struct lpfc_vport *vport,
5411 struct lpfc_nodelist *ndlp,
5412 uint32_t *payload)
5413 {
5414 struct lpfc_els_event_header *els_data = NULL;
5415 struct lpfc_logo_event *logo_data = NULL;
5416 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5417
5418 if (*payload == ELS_CMD_LOGO) {
5419 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
5420 if (!logo_data) {
5421 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5422 "0148 Failed to allocate memory "
5423 "for LOGO event\n");
5424 return;
5425 }
5426 els_data = &logo_data->header;
5427 } else {
5428 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
5429 GFP_KERNEL);
5430 if (!els_data) {
5431 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5432 "0149 Failed to allocate memory "
5433 "for ELS event\n");
5434 return;
5435 }
5436 }
5437 els_data->event_type = FC_REG_ELS_EVENT;
5438 switch (*payload) {
5439 case ELS_CMD_PLOGI:
5440 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
5441 break;
5442 case ELS_CMD_PRLO:
5443 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
5444 break;
5445 case ELS_CMD_ADISC:
5446 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
5447 break;
5448 case ELS_CMD_LOGO:
5449 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
5450 /* Copy the WWPN in the LOGO payload */
5451 memcpy(logo_data->logo_wwpn, &payload[2],
5452 sizeof(struct lpfc_name));
5453 break;
5454 default:
5455 kfree(els_data);
5456 return;
5457 }
5458 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
5459 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
5460 if (*payload == ELS_CMD_LOGO) {
5461 fc_host_post_vendor_event(shost,
5462 fc_get_event_number(),
5463 sizeof(struct lpfc_logo_event),
5464 (char *)logo_data,
5465 LPFC_NL_VENDOR_ID);
5466 kfree(logo_data);
5467 } else {
5468 fc_host_post_vendor_event(shost,
5469 fc_get_event_number(),
5470 sizeof(struct lpfc_els_event_header),
5471 (char *)els_data,
5472 LPFC_NL_VENDOR_ID);
5473 kfree(els_data);
5474 }
5475
5476 return;
5477 }
5478
5479
5480 /**
5481 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
5482 * @phba: pointer to lpfc hba data structure.
5483 * @pring: pointer to a SLI ring.
5484 * @vport: pointer to a host virtual N_Port data structure.
5485 * @elsiocb: pointer to lpfc els command iocb data structure.
5486 *
5487 * This routine is used for processing the IOCB associated with a unsolicited
5488 * event. It first determines whether there is an existing ndlp that matches
5489 * the DID from the unsolicited IOCB. If not, it will create a new one with
5490 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
5491 * IOCB is then used to invoke the proper routine and to set up proper state
5492 * of the discovery state machine.
5493 **/
5494 static void
5495 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5496 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
5497 {
5498 struct Scsi_Host *shost;
5499 struct lpfc_nodelist *ndlp;
5500 struct ls_rjt stat;
5501 uint32_t *payload;
5502 uint32_t cmd, did, newnode, rjt_err = 0;
5503 IOCB_t *icmd = &elsiocb->iocb;
5504
5505 if (!vport || !(elsiocb->context2))
5506 goto dropit;
5507
5508 newnode = 0;
5509 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
5510 cmd = *payload;
5511 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
5512 lpfc_post_buffer(phba, pring, 1);
5513
5514 did = icmd->un.rcvels.remoteID;
5515 if (icmd->ulpStatus) {
5516 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5517 "RCV Unsol ELS: status:x%x/x%x did:x%x",
5518 icmd->ulpStatus, icmd->un.ulpWord[4], did);
5519 goto dropit;
5520 }
5521
5522 /* Check to see if link went down during discovery */
5523 if (lpfc_els_chk_latt(vport))
5524 goto dropit;
5525
5526 /* Ignore traffic received during vport shutdown. */
5527 if (vport->load_flag & FC_UNLOADING)
5528 goto dropit;
5529
5530 ndlp = lpfc_findnode_did(vport, did);
5531 if (!ndlp) {
5532 /* Cannot find existing Fabric ndlp, so allocate a new one */
5533 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5534 if (!ndlp)
5535 goto dropit;
5536
5537 lpfc_nlp_init(vport, ndlp, did);
5538 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5539 newnode = 1;
5540 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5541 ndlp->nlp_type |= NLP_FABRIC;
5542 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5543 ndlp = lpfc_enable_node(vport, ndlp,
5544 NLP_STE_UNUSED_NODE);
5545 if (!ndlp)
5546 goto dropit;
5547 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5548 newnode = 1;
5549 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5550 ndlp->nlp_type |= NLP_FABRIC;
5551 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
5552 /* This is similar to the new node path */
5553 ndlp = lpfc_nlp_get(ndlp);
5554 if (!ndlp)
5555 goto dropit;
5556 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5557 newnode = 1;
5558 }
5559
5560 phba->fc_stat.elsRcvFrame++;
5561
5562 elsiocb->context1 = lpfc_nlp_get(ndlp);
5563 elsiocb->vport = vport;
5564
5565 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
5566 cmd &= ELS_CMD_MASK;
5567 }
5568 /* ELS command <elsCmd> received from NPORT <did> */
5569 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5570 "0112 ELS command x%x received from NPORT x%x "
5571 "Data: x%x\n", cmd, did, vport->port_state);
5572 switch (cmd) {
5573 case ELS_CMD_PLOGI:
5574 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5575 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
5576 did, vport->port_state, ndlp->nlp_flag);
5577
5578 phba->fc_stat.elsRcvPLOGI++;
5579 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
5580
5581 lpfc_send_els_event(vport, ndlp, payload);
5582 if (vport->port_state < LPFC_DISC_AUTH) {
5583 if (!(phba->pport->fc_flag & FC_PT2PT) ||
5584 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
5585 rjt_err = LSRJT_UNABLE_TPC;
5586 break;
5587 }
5588 /* We get here, and drop thru, if we are PT2PT with
5589 * another NPort and the other side has initiated
5590 * the PLOGI before responding to our FLOGI.
5591 */
5592 }
5593
5594 shost = lpfc_shost_from_vport(vport);
5595 spin_lock_irq(shost->host_lock);
5596 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
5597 spin_unlock_irq(shost->host_lock);
5598
5599 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5600 NLP_EVT_RCV_PLOGI);
5601
5602 break;
5603 case ELS_CMD_FLOGI:
5604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5605 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
5606 did, vport->port_state, ndlp->nlp_flag);
5607
5608 phba->fc_stat.elsRcvFLOGI++;
5609 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
5610 if (newnode)
5611 lpfc_nlp_put(ndlp);
5612 break;
5613 case ELS_CMD_LOGO:
5614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5615 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
5616 did, vport->port_state, ndlp->nlp_flag);
5617
5618 phba->fc_stat.elsRcvLOGO++;
5619 lpfc_send_els_event(vport, ndlp, payload);
5620 if (vport->port_state < LPFC_DISC_AUTH) {
5621 rjt_err = LSRJT_UNABLE_TPC;
5622 break;
5623 }
5624 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
5625 break;
5626 case ELS_CMD_PRLO:
5627 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5628 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
5629 did, vport->port_state, ndlp->nlp_flag);
5630
5631 phba->fc_stat.elsRcvPRLO++;
5632 lpfc_send_els_event(vport, ndlp, payload);
5633 if (vport->port_state < LPFC_DISC_AUTH) {
5634 rjt_err = LSRJT_UNABLE_TPC;
5635 break;
5636 }
5637 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
5638 break;
5639 case ELS_CMD_RSCN:
5640 phba->fc_stat.elsRcvRSCN++;
5641 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
5642 if (newnode)
5643 lpfc_nlp_put(ndlp);
5644 break;
5645 case ELS_CMD_ADISC:
5646 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5647 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
5648 did, vport->port_state, ndlp->nlp_flag);
5649
5650 lpfc_send_els_event(vport, ndlp, payload);
5651 phba->fc_stat.elsRcvADISC++;
5652 if (vport->port_state < LPFC_DISC_AUTH) {
5653 rjt_err = LSRJT_UNABLE_TPC;
5654 break;
5655 }
5656 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5657 NLP_EVT_RCV_ADISC);
5658 break;
5659 case ELS_CMD_PDISC:
5660 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5661 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
5662 did, vport->port_state, ndlp->nlp_flag);
5663
5664 phba->fc_stat.elsRcvPDISC++;
5665 if (vport->port_state < LPFC_DISC_AUTH) {
5666 rjt_err = LSRJT_UNABLE_TPC;
5667 break;
5668 }
5669 lpfc_disc_state_machine(vport, ndlp, elsiocb,
5670 NLP_EVT_RCV_PDISC);
5671 break;
5672 case ELS_CMD_FARPR:
5673 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5674 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
5675 did, vport->port_state, ndlp->nlp_flag);
5676
5677 phba->fc_stat.elsRcvFARPR++;
5678 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
5679 break;
5680 case ELS_CMD_FARP:
5681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5682 "RCV FARP: did:x%x/ste:x%x flg:x%x",
5683 did, vport->port_state, ndlp->nlp_flag);
5684
5685 phba->fc_stat.elsRcvFARP++;
5686 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
5687 break;
5688 case ELS_CMD_FAN:
5689 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5690 "RCV FAN: did:x%x/ste:x%x flg:x%x",
5691 did, vport->port_state, ndlp->nlp_flag);
5692
5693 phba->fc_stat.elsRcvFAN++;
5694 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
5695 break;
5696 case ELS_CMD_PRLI:
5697 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5698 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
5699 did, vport->port_state, ndlp->nlp_flag);
5700
5701 phba->fc_stat.elsRcvPRLI++;
5702 if (vport->port_state < LPFC_DISC_AUTH) {
5703 rjt_err = LSRJT_UNABLE_TPC;
5704 break;
5705 }
5706 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
5707 break;
5708 case ELS_CMD_LIRR:
5709 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5710 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
5711 did, vport->port_state, ndlp->nlp_flag);
5712
5713 phba->fc_stat.elsRcvLIRR++;
5714 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
5715 if (newnode)
5716 lpfc_nlp_put(ndlp);
5717 break;
5718 case ELS_CMD_RPS:
5719 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5720 "RCV RPS: did:x%x/ste:x%x flg:x%x",
5721 did, vport->port_state, ndlp->nlp_flag);
5722
5723 phba->fc_stat.elsRcvRPS++;
5724 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
5725 if (newnode)
5726 lpfc_nlp_put(ndlp);
5727 break;
5728 case ELS_CMD_RPL:
5729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5730 "RCV RPL: did:x%x/ste:x%x flg:x%x",
5731 did, vport->port_state, ndlp->nlp_flag);
5732
5733 phba->fc_stat.elsRcvRPL++;
5734 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
5735 if (newnode)
5736 lpfc_nlp_put(ndlp);
5737 break;
5738 case ELS_CMD_RNID:
5739 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5740 "RCV RNID: did:x%x/ste:x%x flg:x%x",
5741 did, vport->port_state, ndlp->nlp_flag);
5742
5743 phba->fc_stat.elsRcvRNID++;
5744 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
5745 if (newnode)
5746 lpfc_nlp_put(ndlp);
5747 break;
5748 case ELS_CMD_RRQ:
5749 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5750 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
5751 did, vport->port_state, ndlp->nlp_flag);
5752
5753 phba->fc_stat.elsRcvRRQ++;
5754 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
5755 if (newnode)
5756 lpfc_nlp_put(ndlp);
5757 break;
5758 default:
5759 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5760 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
5761 cmd, did, vport->port_state);
5762
5763 /* Unsupported ELS command, reject */
5764 rjt_err = LSRJT_INVALID_CMD;
5765
5766 /* Unknown ELS command <elsCmd> received from NPORT <did> */
5767 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5768 "0115 Unknown ELS command x%x "
5769 "received from NPORT x%x\n", cmd, did);
5770 if (newnode)
5771 lpfc_nlp_put(ndlp);
5772 break;
5773 }
5774
5775 /* check if need to LS_RJT received ELS cmd */
5776 if (rjt_err) {
5777 memset(&stat, 0, sizeof(stat));
5778 stat.un.b.lsRjtRsnCode = rjt_err;
5779 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
5780 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
5781 NULL);
5782 }
5783
5784 lpfc_nlp_put(elsiocb->context1);
5785 elsiocb->context1 = NULL;
5786 return;
5787
5788 dropit:
5789 if (vport && !(vport->load_flag & FC_UNLOADING))
5790 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5791 "0111 Dropping received ELS cmd "
5792 "Data: x%x x%x x%x\n",
5793 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5794 phba->fc_stat.elsRcvDrop++;
5795 }
5796
5797 /**
5798 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5799 * @phba: pointer to lpfc hba data structure.
5800 * @vpi: host virtual N_Port identifier.
5801 *
5802 * This routine finds a vport on a HBA (referred by @phba) through a
5803 * @vpi. The function walks the HBA's vport list and returns the address
5804 * of the vport with the matching @vpi.
5805 *
5806 * Return code
5807 * NULL - No vport with the matching @vpi found
5808 * Otherwise - Address to the vport with the matching @vpi.
5809 **/
5810 struct lpfc_vport *
5811 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5812 {
5813 struct lpfc_vport *vport;
5814 unsigned long flags;
5815
5816 spin_lock_irqsave(&phba->hbalock, flags);
5817 list_for_each_entry(vport, &phba->port_list, listentry) {
5818 if (vport->vpi == vpi) {
5819 spin_unlock_irqrestore(&phba->hbalock, flags);
5820 return vport;
5821 }
5822 }
5823 spin_unlock_irqrestore(&phba->hbalock, flags);
5824 return NULL;
5825 }
5826
5827 /**
5828 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
5829 * @phba: pointer to lpfc hba data structure.
5830 * @pring: pointer to a SLI ring.
5831 * @elsiocb: pointer to lpfc els iocb data structure.
5832 *
5833 * This routine is used to process an unsolicited event received from a SLI
5834 * (Service Level Interface) ring. The actual processing of the data buffer
5835 * associated with the unsolicited event is done by invoking the routine
5836 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
5837 * SLI ring on which the unsolicited event was received.
5838 **/
5839 void
5840 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5841 struct lpfc_iocbq *elsiocb)
5842 {
5843 struct lpfc_vport *vport = phba->pport;
5844 IOCB_t *icmd = &elsiocb->iocb;
5845 dma_addr_t paddr;
5846 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
5847 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
5848
5849 elsiocb->context1 = NULL;
5850 elsiocb->context2 = NULL;
5851 elsiocb->context3 = NULL;
5852
5853 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
5854 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
5855 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
5856 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
5857 phba->fc_stat.NoRcvBuf++;
5858 /* Not enough posted buffers; Try posting more buffers */
5859 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
5860 lpfc_post_buffer(phba, pring, 0);
5861 return;
5862 }
5863
5864 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5865 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
5866 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5867 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5868 vport = phba->pport;
5869 else
5870 vport = lpfc_find_vport_by_vpid(phba,
5871 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5872 }
5873 /* If there are no BDEs associated
5874 * with this IOCB, there is nothing to do.
5875 */
5876 if (icmd->ulpBdeCount == 0)
5877 return;
5878
5879 /* type of ELS cmd is first 32bit word
5880 * in packet
5881 */
5882 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5883 elsiocb->context2 = bdeBuf1;
5884 } else {
5885 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
5886 icmd->un.cont64[0].addrLow);
5887 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
5888 paddr);
5889 }
5890
5891 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
5892 /*
5893 * The different unsolicited event handlers would tell us
5894 * if they are done with "mp" by setting context2 to NULL.
5895 */
5896 if (elsiocb->context2) {
5897 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
5898 elsiocb->context2 = NULL;
5899 }
5900
5901 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
5902 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
5903 icmd->ulpBdeCount == 2) {
5904 elsiocb->context2 = bdeBuf2;
5905 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
5906 /* free mp if we are done with it */
5907 if (elsiocb->context2) {
5908 lpfc_in_buf_free(phba, elsiocb->context2);
5909 elsiocb->context2 = NULL;
5910 }
5911 }
5912 }
5913
5914 /**
5915 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
5916 * @phba: pointer to lpfc hba data structure.
5917 * @vport: pointer to a virtual N_Port data structure.
5918 *
5919 * This routine issues a Port Login (PLOGI) to the Name Server with
5920 * State Change Request (SCR) for a @vport. This routine will create an
5921 * ndlp for the Name Server associated to the @vport if such node does
5922 * not already exist. The PLOGI to Name Server is issued by invoking the
5923 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
5924 * (FDMI) is configured to the @vport, a FDMI node will be created and
5925 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
5926 **/
5927 void
5928 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5929 {
5930 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
5931
5932 ndlp = lpfc_findnode_did(vport, NameServer_DID);
5933 if (!ndlp) {
5934 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5935 if (!ndlp) {
5936 if (phba->fc_topology == TOPOLOGY_LOOP) {
5937 lpfc_disc_start(vport);
5938 return;
5939 }
5940 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5941 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5942 "0251 NameServer login: no memory\n");
5943 return;
5944 }
5945 lpfc_nlp_init(vport, ndlp, NameServer_DID);
5946 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5947 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5948 if (!ndlp) {
5949 if (phba->fc_topology == TOPOLOGY_LOOP) {
5950 lpfc_disc_start(vport);
5951 return;
5952 }
5953 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5954 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5955 "0348 NameServer login: node freed\n");
5956 return;
5957 }
5958 }
5959 ndlp->nlp_type |= NLP_FABRIC;
5960
5961 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5962
5963 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
5964 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
5965 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5966 "0252 Cannot issue NameServer login\n");
5967 return;
5968 }
5969
5970 if (vport->cfg_fdmi_on) {
5971 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
5972 GFP_KERNEL);
5973 if (ndlp_fdmi) {
5974 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
5975 ndlp_fdmi->nlp_type |= NLP_FABRIC;
5976 lpfc_nlp_set_state(vport, ndlp_fdmi,
5977 NLP_STE_PLOGI_ISSUE);
5978 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
5979 0);
5980 }
5981 }
5982 return;
5983 }
5984
5985 /**
5986 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
5987 * @phba: pointer to lpfc hba data structure.
5988 * @pmb: pointer to the driver internal queue element for mailbox command.
5989 *
5990 * This routine is the completion callback function to register new vport
5991 * mailbox command. If the new vport mailbox command completes successfully,
5992 * the fabric registration login shall be performed on physical port (the
5993 * new vport created is actually a physical port, with VPI 0) or the port
5994 * login to Name Server for State Change Request (SCR) will be performed
5995 * on virtual port (real virtual port, with VPI greater than 0).
5996 **/
5997 static void
5998 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5999 {
6000 struct lpfc_vport *vport = pmb->vport;
6001 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6002 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
6003 MAILBOX_t *mb = &pmb->u.mb;
6004 int rc;
6005
6006 spin_lock_irq(shost->host_lock);
6007 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6008 spin_unlock_irq(shost->host_lock);
6009
6010 if (mb->mbxStatus) {
6011 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6012 "0915 Register VPI failed: 0x%x\n",
6013 mb->mbxStatus);
6014
6015 switch (mb->mbxStatus) {
6016 case 0x11: /* unsupported feature */
6017 case 0x9603: /* max_vpi exceeded */
6018 case 0x9602: /* Link event since CLEAR_LA */
6019 /* giving up on vport registration */
6020 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6021 spin_lock_irq(shost->host_lock);
6022 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6023 spin_unlock_irq(shost->host_lock);
6024 lpfc_can_disctmo(vport);
6025 break;
6026 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6027 case 0x20:
6028 spin_lock_irq(shost->host_lock);
6029 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6030 spin_unlock_irq(shost->host_lock);
6031 lpfc_init_vpi(phba, pmb, vport->vpi);
6032 pmb->vport = vport;
6033 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6034 rc = lpfc_sli_issue_mbox(phba, pmb,
6035 MBX_NOWAIT);
6036 if (rc == MBX_NOT_FINISHED) {
6037 lpfc_printf_vlog(vport,
6038 KERN_ERR, LOG_MBOX,
6039 "2732 Failed to issue INIT_VPI"
6040 " mailbox command\n");
6041 } else {
6042 lpfc_nlp_put(ndlp);
6043 return;
6044 }
6045
6046 default:
6047 /* Try to recover from this error */
6048 lpfc_mbx_unreg_vpi(vport);
6049 spin_lock_irq(shost->host_lock);
6050 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6051 spin_unlock_irq(shost->host_lock);
6052 if (vport->port_type == LPFC_PHYSICAL_PORT)
6053 lpfc_initial_flogi(vport);
6054 else
6055 lpfc_initial_fdisc(vport);
6056 break;
6057 }
6058 } else {
6059 spin_lock_irq(shost->host_lock);
6060 vport->vpi_state |= LPFC_VPI_REGISTERED;
6061 spin_unlock_irq(shost->host_lock);
6062 if (vport == phba->pport) {
6063 if (phba->sli_rev < LPFC_SLI_REV4)
6064 lpfc_issue_fabric_reglogin(vport);
6065 else {
6066 /*
6067 * If the physical port is instantiated using
6068 * FDISC, do not start vport discovery.
6069 */
6070 if (vport->port_state != LPFC_FDISC)
6071 lpfc_start_fdiscs(phba);
6072 lpfc_do_scr_ns_plogi(phba, vport);
6073 }
6074 } else
6075 lpfc_do_scr_ns_plogi(phba, vport);
6076 }
6077
6078 /* Now, we decrement the ndlp reference count held for this
6079 * callback function
6080 */
6081 lpfc_nlp_put(ndlp);
6082
6083 mempool_free(pmb, phba->mbox_mem_pool);
6084 return;
6085 }
6086
6087 /**
6088 * lpfc_register_new_vport - Register a new vport with a HBA
6089 * @phba: pointer to lpfc hba data structure.
6090 * @vport: pointer to a host virtual N_Port data structure.
6091 * @ndlp: pointer to a node-list data structure.
6092 *
6093 * This routine registers the @vport as a new virtual port with a HBA.
6094 * It is done through a registering vpi mailbox command.
6095 **/
6096 void
6097 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
6098 struct lpfc_nodelist *ndlp)
6099 {
6100 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6101 LPFC_MBOXQ_t *mbox;
6102
6103 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6104 if (mbox) {
6105 lpfc_reg_vpi(vport, mbox);
6106 mbox->vport = vport;
6107 mbox->context2 = lpfc_nlp_get(ndlp);
6108 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
6109 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6110 == MBX_NOT_FINISHED) {
6111 /* mailbox command not success, decrement ndlp
6112 * reference count for this command
6113 */
6114 lpfc_nlp_put(ndlp);
6115 mempool_free(mbox, phba->mbox_mem_pool);
6116
6117 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6118 "0253 Register VPI: Can't send mbox\n");
6119 goto mbox_err_exit;
6120 }
6121 } else {
6122 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6123 "0254 Register VPI: no memory\n");
6124 goto mbox_err_exit;
6125 }
6126 return;
6127
6128 mbox_err_exit:
6129 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6130 spin_lock_irq(shost->host_lock);
6131 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6132 spin_unlock_irq(shost->host_lock);
6133 return;
6134 }
6135
6136 /**
6137 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6138 * @phba: pointer to lpfc hba data structure.
6139 *
6140 * This routine cancels the retry delay timers to all the vports.
6141 **/
6142 void
6143 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6144 {
6145 struct lpfc_vport **vports;
6146 struct lpfc_nodelist *ndlp;
6147 uint32_t link_state;
6148 int i;
6149
6150 /* Treat this failure as linkdown for all vports */
6151 link_state = phba->link_state;
6152 lpfc_linkdown(phba);
6153 phba->link_state = link_state;
6154
6155 vports = lpfc_create_vport_work_array(phba);
6156
6157 if (vports) {
6158 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6159 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6160 if (ndlp)
6161 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6162 lpfc_els_flush_cmd(vports[i]);
6163 }
6164 lpfc_destroy_vport_work_array(phba, vports);
6165 }
6166 }
6167
6168 /**
6169 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6170 * @phba: pointer to lpfc hba data structure.
6171 *
6172 * This routine abort all pending discovery commands and
6173 * start a timer to retry FLOGI for the physical port
6174 * discovery.
6175 **/
6176 void
6177 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6178 {
6179 struct lpfc_nodelist *ndlp;
6180 struct Scsi_Host *shost;
6181
6182 /* Cancel the all vports retry delay retry timers */
6183 lpfc_cancel_all_vport_retry_delay_timer(phba);
6184
6185 /* If fabric require FLOGI, then re-instantiate physical login */
6186 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6187 if (!ndlp)
6188 return;
6189
6190 shost = lpfc_shost_from_vport(phba->pport);
6191 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6192 spin_lock_irq(shost->host_lock);
6193 ndlp->nlp_flag |= NLP_DELAY_TMO;
6194 spin_unlock_irq(shost->host_lock);
6195 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
6196 phba->pport->port_state = LPFC_FLOGI;
6197 return;
6198 }
6199
6200 /**
6201 * lpfc_fabric_login_reqd - Check if FLOGI required.
6202 * @phba: pointer to lpfc hba data structure.
6203 * @cmdiocb: pointer to FDISC command iocb.
6204 * @rspiocb: pointer to FDISC response iocb.
6205 *
6206 * This routine checks if a FLOGI is reguired for FDISC
6207 * to succeed.
6208 **/
6209 static int
6210 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
6211 struct lpfc_iocbq *cmdiocb,
6212 struct lpfc_iocbq *rspiocb)
6213 {
6214
6215 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
6216 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
6217 return 0;
6218 else
6219 return 1;
6220 }
6221
6222 /**
6223 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
6224 * @phba: pointer to lpfc hba data structure.
6225 * @cmdiocb: pointer to lpfc command iocb data structure.
6226 * @rspiocb: pointer to lpfc response iocb data structure.
6227 *
6228 * This routine is the completion callback function to a Fabric Discover
6229 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
6230 * single threaded, each FDISC completion callback function will reset
6231 * the discovery timer for all vports such that the timers will not get
6232 * unnecessary timeout. The function checks the FDISC IOCB status. If error
6233 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
6234 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
6235 * assigned to the vport has been changed with the completion of the FDISC
6236 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
6237 * are unregistered from the HBA, and then the lpfc_register_new_vport()
6238 * routine is invoked to register new vport with the HBA. Otherwise, the
6239 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
6240 * Server for State Change Request (SCR).
6241 **/
6242 static void
6243 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6244 struct lpfc_iocbq *rspiocb)
6245 {
6246 struct lpfc_vport *vport = cmdiocb->vport;
6247 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6248 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
6249 struct lpfc_nodelist *np;
6250 struct lpfc_nodelist *next_np;
6251 IOCB_t *irsp = &rspiocb->iocb;
6252 struct lpfc_iocbq *piocb;
6253
6254 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6255 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
6256 irsp->ulpStatus, irsp->un.ulpWord[4],
6257 vport->fc_prevDID);
6258 /* Since all FDISCs are being single threaded, we
6259 * must reset the discovery timer for ALL vports
6260 * waiting to send FDISC when one completes.
6261 */
6262 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
6263 lpfc_set_disctmo(piocb->vport);
6264 }
6265
6266 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6267 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
6268 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
6269
6270 if (irsp->ulpStatus) {
6271
6272 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
6273 lpfc_retry_pport_discovery(phba);
6274 goto out;
6275 }
6276
6277 /* Check for retry */
6278 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
6279 goto out;
6280 /* FDISC failed */
6281 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6282 "0126 FDISC failed. (%d/%d)\n",
6283 irsp->ulpStatus, irsp->un.ulpWord[4]);
6284 goto fdisc_failed;
6285 }
6286 spin_lock_irq(shost->host_lock);
6287 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6288 vport->fc_flag |= FC_FABRIC;
6289 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6290 vport->fc_flag |= FC_PUBLIC_LOOP;
6291 spin_unlock_irq(shost->host_lock);
6292
6293 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
6294 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
6295 if ((vport->fc_prevDID != vport->fc_myDID) &&
6296 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
6297 /* If our NportID changed, we need to ensure all
6298 * remaining NPORTs get unreg_login'ed so we can
6299 * issue unreg_vpi.
6300 */
6301 list_for_each_entry_safe(np, next_np,
6302 &vport->fc_nodes, nlp_listp) {
6303 if (!NLP_CHK_NODE_ACT(ndlp) ||
6304 (np->nlp_state != NLP_STE_NPR_NODE) ||
6305 !(np->nlp_flag & NLP_NPR_ADISC))
6306 continue;
6307 spin_lock_irq(shost->host_lock);
6308 np->nlp_flag &= ~NLP_NPR_ADISC;
6309 spin_unlock_irq(shost->host_lock);
6310 lpfc_unreg_rpi(vport, np);
6311 }
6312 lpfc_mbx_unreg_vpi(vport);
6313 spin_lock_irq(shost->host_lock);
6314 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6315 if (phba->sli_rev == LPFC_SLI_REV4)
6316 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6317 spin_unlock_irq(shost->host_lock);
6318 }
6319
6320 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
6321 lpfc_issue_init_vpi(vport);
6322 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
6323 lpfc_register_new_vport(phba, vport, ndlp);
6324 else
6325 lpfc_do_scr_ns_plogi(phba, vport);
6326 goto out;
6327 fdisc_failed:
6328 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6329 /* Cancel discovery timer */
6330 lpfc_can_disctmo(vport);
6331 lpfc_nlp_put(ndlp);
6332 out:
6333 lpfc_els_free_iocb(phba, cmdiocb);
6334 }
6335
6336 /**
6337 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
6338 * @vport: pointer to a virtual N_Port data structure.
6339 * @ndlp: pointer to a node-list data structure.
6340 * @retry: number of retries to the command IOCB.
6341 *
6342 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
6343 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
6344 * routine to issue the IOCB, which makes sure only one outstanding fabric
6345 * IOCB will be sent off HBA at any given time.
6346 *
6347 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6348 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6349 * will be stored into the context1 field of the IOCB for the completion
6350 * callback function to the FDISC ELS command.
6351 *
6352 * Return code
6353 * 0 - Successfully issued fdisc iocb command
6354 * 1 - Failed to issue fdisc iocb command
6355 **/
6356 static int
6357 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6358 uint8_t retry)
6359 {
6360 struct lpfc_hba *phba = vport->phba;
6361 IOCB_t *icmd;
6362 struct lpfc_iocbq *elsiocb;
6363 struct serv_parm *sp;
6364 uint8_t *pcmd;
6365 uint16_t cmdsize;
6366 int did = ndlp->nlp_DID;
6367 int rc;
6368
6369 vport->port_state = LPFC_FDISC;
6370 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
6371 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
6372 ELS_CMD_FDISC);
6373 if (!elsiocb) {
6374 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6375 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6376 "0255 Issue FDISC: no IOCB\n");
6377 return 1;
6378 }
6379
6380 icmd = &elsiocb->iocb;
6381 icmd->un.elsreq64.myID = 0;
6382 icmd->un.elsreq64.fl = 1;
6383
6384 if (phba->sli_rev == LPFC_SLI_REV4) {
6385 /* FDISC needs to be 1 for WQE VPI */
6386 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6387 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6388 /* Set the ulpContext to the vpi */
6389 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6390 } else {
6391 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6392 icmd->ulpCt_h = 1;
6393 icmd->ulpCt_l = 0;
6394 }
6395
6396 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6397 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
6398 pcmd += sizeof(uint32_t); /* CSP Word 1 */
6399 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
6400 sp = (struct serv_parm *) pcmd;
6401 /* Setup CSPs accordingly for Fabric */
6402 sp->cmn.e_d_tov = 0;
6403 sp->cmn.w2.r_a_tov = 0;
6404 sp->cls1.classValid = 0;
6405 sp->cls2.seqDelivery = 1;
6406 sp->cls3.seqDelivery = 1;
6407
6408 pcmd += sizeof(uint32_t); /* CSP Word 2 */
6409 pcmd += sizeof(uint32_t); /* CSP Word 3 */
6410 pcmd += sizeof(uint32_t); /* CSP Word 4 */
6411 pcmd += sizeof(uint32_t); /* Port Name */
6412 memcpy(pcmd, &vport->fc_portname, 8);
6413 pcmd += sizeof(uint32_t); /* Node Name */
6414 pcmd += sizeof(uint32_t); /* Node Name */
6415 memcpy(pcmd, &vport->fc_nodename, 8);
6416
6417 lpfc_set_disctmo(vport);
6418
6419 phba->fc_stat.elsXmitFDISC++;
6420 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
6421
6422 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6423 "Issue FDISC: did:x%x",
6424 did, 0, 0);
6425
6426 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
6427 if (rc == IOCB_ERROR) {
6428 lpfc_els_free_iocb(phba, elsiocb);
6429 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6430 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6431 "0256 Issue FDISC: Cannot send IOCB\n");
6432 return 1;
6433 }
6434 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
6435 return 0;
6436 }
6437
6438 /**
6439 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
6440 * @phba: pointer to lpfc hba data structure.
6441 * @cmdiocb: pointer to lpfc command iocb data structure.
6442 * @rspiocb: pointer to lpfc response iocb data structure.
6443 *
6444 * This routine is the completion callback function to the issuing of a LOGO
6445 * ELS command off a vport. It frees the command IOCB and then decrement the
6446 * reference count held on ndlp for this completion function, indicating that
6447 * the reference to the ndlp is no long needed. Note that the
6448 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
6449 * callback function and an additional explicit ndlp reference decrementation
6450 * will trigger the actual release of the ndlp.
6451 **/
6452 static void
6453 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6454 struct lpfc_iocbq *rspiocb)
6455 {
6456 struct lpfc_vport *vport = cmdiocb->vport;
6457 IOCB_t *irsp;
6458 struct lpfc_nodelist *ndlp;
6459 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
6460
6461 irsp = &rspiocb->iocb;
6462 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6463 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
6464 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
6465
6466 lpfc_els_free_iocb(phba, cmdiocb);
6467 vport->unreg_vpi_cmpl = VPORT_ERROR;
6468
6469 /* Trigger the release of the ndlp after logo */
6470 lpfc_nlp_put(ndlp);
6471 }
6472
6473 /**
6474 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
6475 * @vport: pointer to a virtual N_Port data structure.
6476 * @ndlp: pointer to a node-list data structure.
6477 *
6478 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
6479 *
6480 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6481 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6482 * will be stored into the context1 field of the IOCB for the completion
6483 * callback function to the LOGO ELS command.
6484 *
6485 * Return codes
6486 * 0 - Successfully issued logo off the @vport
6487 * 1 - Failed to issue logo off the @vport
6488 **/
6489 int
6490 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6491 {
6492 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6493 struct lpfc_hba *phba = vport->phba;
6494 IOCB_t *icmd;
6495 struct lpfc_iocbq *elsiocb;
6496 uint8_t *pcmd;
6497 uint16_t cmdsize;
6498
6499 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
6500 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
6501 ELS_CMD_LOGO);
6502 if (!elsiocb)
6503 return 1;
6504
6505 icmd = &elsiocb->iocb;
6506 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6507 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
6508 pcmd += sizeof(uint32_t);
6509
6510 /* Fill in LOGO payload */
6511 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
6512 pcmd += sizeof(uint32_t);
6513 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
6514
6515 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6516 "Issue LOGO npiv did:x%x flg:x%x",
6517 ndlp->nlp_DID, ndlp->nlp_flag, 0);
6518
6519 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
6520 spin_lock_irq(shost->host_lock);
6521 ndlp->nlp_flag |= NLP_LOGO_SND;
6522 spin_unlock_irq(shost->host_lock);
6523 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6524 IOCB_ERROR) {
6525 spin_lock_irq(shost->host_lock);
6526 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6527 spin_unlock_irq(shost->host_lock);
6528 lpfc_els_free_iocb(phba, elsiocb);
6529 return 1;
6530 }
6531 return 0;
6532 }
6533
6534 /**
6535 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
6536 * @ptr: holder for the timer function associated data.
6537 *
6538 * This routine is invoked by the fabric iocb block timer after
6539 * timeout. It posts the fabric iocb block timeout event by setting the
6540 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
6541 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
6542 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
6543 * posted event WORKER_FABRIC_BLOCK_TMO.
6544 **/
6545 void
6546 lpfc_fabric_block_timeout(unsigned long ptr)
6547 {
6548 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6549 unsigned long iflags;
6550 uint32_t tmo_posted;
6551
6552 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
6553 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
6554 if (!tmo_posted)
6555 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
6556 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
6557
6558 if (!tmo_posted)
6559 lpfc_worker_wake_up(phba);
6560 return;
6561 }
6562
6563 /**
6564 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
6565 * @phba: pointer to lpfc hba data structure.
6566 *
6567 * This routine issues one fabric iocb from the driver internal list to
6568 * the HBA. It first checks whether it's ready to issue one fabric iocb to
6569 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
6570 * remove one pending fabric iocb from the driver internal list and invokes
6571 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
6572 **/
6573 static void
6574 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6575 {
6576 struct lpfc_iocbq *iocb;
6577 unsigned long iflags;
6578 int ret;
6579 IOCB_t *cmd;
6580
6581 repeat:
6582 iocb = NULL;
6583 spin_lock_irqsave(&phba->hbalock, iflags);
6584 /* Post any pending iocb to the SLI layer */
6585 if (atomic_read(&phba->fabric_iocb_count) == 0) {
6586 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
6587 list);
6588 if (iocb)
6589 /* Increment fabric iocb count to hold the position */
6590 atomic_inc(&phba->fabric_iocb_count);
6591 }
6592 spin_unlock_irqrestore(&phba->hbalock, iflags);
6593 if (iocb) {
6594 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6595 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6596 iocb->iocb_flag |= LPFC_IO_FABRIC;
6597
6598 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6599 "Fabric sched1: ste:x%x",
6600 iocb->vport->port_state, 0, 0);
6601
6602 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6603
6604 if (ret == IOCB_ERROR) {
6605 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6606 iocb->fabric_iocb_cmpl = NULL;
6607 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6608 cmd = &iocb->iocb;
6609 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
6610 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
6611 iocb->iocb_cmpl(phba, iocb, iocb);
6612
6613 atomic_dec(&phba->fabric_iocb_count);
6614 goto repeat;
6615 }
6616 }
6617
6618 return;
6619 }
6620
6621 /**
6622 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
6623 * @phba: pointer to lpfc hba data structure.
6624 *
6625 * This routine unblocks the issuing fabric iocb command. The function
6626 * will clear the fabric iocb block bit and then invoke the routine
6627 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
6628 * from the driver internal fabric iocb list.
6629 **/
6630 void
6631 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
6632 {
6633 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6634
6635 lpfc_resume_fabric_iocbs(phba);
6636 return;
6637 }
6638
6639 /**
6640 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
6641 * @phba: pointer to lpfc hba data structure.
6642 *
6643 * This routine blocks the issuing fabric iocb for a specified amount of
6644 * time (currently 100 ms). This is done by set the fabric iocb block bit
6645 * and set up a timeout timer for 100ms. When the block bit is set, no more
6646 * fabric iocb will be issued out of the HBA.
6647 **/
6648 static void
6649 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
6650 {
6651 int blocked;
6652
6653 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6654 /* Start a timer to unblock fabric iocbs after 100ms */
6655 if (!blocked)
6656 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
6657
6658 return;
6659 }
6660
6661 /**
6662 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
6663 * @phba: pointer to lpfc hba data structure.
6664 * @cmdiocb: pointer to lpfc command iocb data structure.
6665 * @rspiocb: pointer to lpfc response iocb data structure.
6666 *
6667 * This routine is the callback function that is put to the fabric iocb's
6668 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
6669 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
6670 * function first restores and invokes the original iocb's callback function
6671 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
6672 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
6673 **/
6674 static void
6675 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6676 struct lpfc_iocbq *rspiocb)
6677 {
6678 struct ls_rjt stat;
6679
6680 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
6681 BUG();
6682
6683 switch (rspiocb->iocb.ulpStatus) {
6684 case IOSTAT_NPORT_RJT:
6685 case IOSTAT_FABRIC_RJT:
6686 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
6687 lpfc_block_fabric_iocbs(phba);
6688 }
6689 break;
6690
6691 case IOSTAT_NPORT_BSY:
6692 case IOSTAT_FABRIC_BSY:
6693 lpfc_block_fabric_iocbs(phba);
6694 break;
6695
6696 case IOSTAT_LS_RJT:
6697 stat.un.lsRjtError =
6698 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
6699 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
6700 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
6701 lpfc_block_fabric_iocbs(phba);
6702 break;
6703 }
6704
6705 if (atomic_read(&phba->fabric_iocb_count) == 0)
6706 BUG();
6707
6708 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
6709 cmdiocb->fabric_iocb_cmpl = NULL;
6710 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
6711 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
6712
6713 atomic_dec(&phba->fabric_iocb_count);
6714 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
6715 /* Post any pending iocbs to HBA */
6716 lpfc_resume_fabric_iocbs(phba);
6717 }
6718 }
6719
6720 /**
6721 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
6722 * @phba: pointer to lpfc hba data structure.
6723 * @iocb: pointer to lpfc command iocb data structure.
6724 *
6725 * This routine is used as the top-level API for issuing a fabric iocb command
6726 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
6727 * function makes sure that only one fabric bound iocb will be outstanding at
6728 * any given time. As such, this function will first check to see whether there
6729 * is already an outstanding fabric iocb on the wire. If so, it will put the
6730 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
6731 * issued later. Otherwise, it will issue the iocb on the wire and update the
6732 * fabric iocb count it indicate that there is one fabric iocb on the wire.
6733 *
6734 * Note, this implementation has a potential sending out fabric IOCBs out of
6735 * order. The problem is caused by the construction of the "ready" boolen does
6736 * not include the condition that the internal fabric IOCB list is empty. As
6737 * such, it is possible a fabric IOCB issued by this routine might be "jump"
6738 * ahead of the fabric IOCBs in the internal list.
6739 *
6740 * Return code
6741 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
6742 * IOCB_ERROR - failed to issue fabric iocb
6743 **/
6744 static int
6745 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6746 {
6747 unsigned long iflags;
6748 int ready;
6749 int ret;
6750
6751 if (atomic_read(&phba->fabric_iocb_count) > 1)
6752 BUG();
6753
6754 spin_lock_irqsave(&phba->hbalock, iflags);
6755 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
6756 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6757
6758 if (ready)
6759 /* Increment fabric iocb count to hold the position */
6760 atomic_inc(&phba->fabric_iocb_count);
6761 spin_unlock_irqrestore(&phba->hbalock, iflags);
6762 if (ready) {
6763 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6764 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6765 iocb->iocb_flag |= LPFC_IO_FABRIC;
6766
6767 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6768 "Fabric sched2: ste:x%x",
6769 iocb->vport->port_state, 0, 0);
6770
6771 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6772
6773 if (ret == IOCB_ERROR) {
6774 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6775 iocb->fabric_iocb_cmpl = NULL;
6776 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6777 atomic_dec(&phba->fabric_iocb_count);
6778 }
6779 } else {
6780 spin_lock_irqsave(&phba->hbalock, iflags);
6781 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
6782 spin_unlock_irqrestore(&phba->hbalock, iflags);
6783 ret = IOCB_SUCCESS;
6784 }
6785 return ret;
6786 }
6787
6788 /**
6789 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
6790 * @vport: pointer to a virtual N_Port data structure.
6791 *
6792 * This routine aborts all the IOCBs associated with a @vport from the
6793 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6794 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6795 * list, removes each IOCB associated with the @vport off the list, set the
6796 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6797 * associated with the IOCB.
6798 **/
6799 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
6800 {
6801 LIST_HEAD(completions);
6802 struct lpfc_hba *phba = vport->phba;
6803 struct lpfc_iocbq *tmp_iocb, *piocb;
6804
6805 spin_lock_irq(&phba->hbalock);
6806 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6807 list) {
6808
6809 if (piocb->vport != vport)
6810 continue;
6811
6812 list_move_tail(&piocb->list, &completions);
6813 }
6814 spin_unlock_irq(&phba->hbalock);
6815
6816 /* Cancel all the IOCBs from the completions list */
6817 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6818 IOERR_SLI_ABORTED);
6819 }
6820
6821 /**
6822 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
6823 * @ndlp: pointer to a node-list data structure.
6824 *
6825 * This routine aborts all the IOCBs associated with an @ndlp from the
6826 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6827 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6828 * list, removes each IOCB associated with the @ndlp off the list, set the
6829 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6830 * associated with the IOCB.
6831 **/
6832 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
6833 {
6834 LIST_HEAD(completions);
6835 struct lpfc_hba *phba = ndlp->phba;
6836 struct lpfc_iocbq *tmp_iocb, *piocb;
6837 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6838
6839 spin_lock_irq(&phba->hbalock);
6840 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6841 list) {
6842 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
6843
6844 list_move_tail(&piocb->list, &completions);
6845 }
6846 }
6847 spin_unlock_irq(&phba->hbalock);
6848
6849 /* Cancel all the IOCBs from the completions list */
6850 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6851 IOERR_SLI_ABORTED);
6852 }
6853
6854 /**
6855 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
6856 * @phba: pointer to lpfc hba data structure.
6857 *
6858 * This routine aborts all the IOCBs currently on the driver internal
6859 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
6860 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
6861 * list, removes IOCBs off the list, set the status feild to
6862 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
6863 * the IOCB.
6864 **/
6865 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6866 {
6867 LIST_HEAD(completions);
6868
6869 spin_lock_irq(&phba->hbalock);
6870 list_splice_init(&phba->fabric_iocb_list, &completions);
6871 spin_unlock_irq(&phba->hbalock);
6872
6873 /* Cancel all the IOCBs from the completions list */
6874 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6875 IOERR_SLI_ABORTED);
6876 }
6877
6878 /**
6879 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6880 * @phba: pointer to lpfc hba data structure.
6881 * @axri: pointer to the els xri abort wcqe structure.
6882 *
6883 * This routine is invoked by the worker thread to process a SLI4 slow-path
6884 * ELS aborted xri.
6885 **/
6886 void
6887 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6888 struct sli4_wcqe_xri_aborted *axri)
6889 {
6890 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6891 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6892 unsigned long iflag = 0;
6893
6894 spin_lock_irqsave(&phba->hbalock, iflag);
6895 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6896 list_for_each_entry_safe(sglq_entry, sglq_next,
6897 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6898 if (sglq_entry->sli4_xritag == xri) {
6899 list_del(&sglq_entry->list);
6900 list_add_tail(&sglq_entry->list,
6901 &phba->sli4_hba.lpfc_sgl_list);
6902 sglq_entry->state = SGL_FREED;
6903 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6904 spin_unlock_irqrestore(&phba->hbalock, iflag);
6905 return;
6906 }
6907 }
6908 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6909 sglq_entry = __lpfc_get_active_sglq(phba, xri);
6910 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6911 spin_unlock_irqrestore(&phba->hbalock, iflag);
6912 return;
6913 }
6914 sglq_entry->state = SGL_XRI_ABORTED;
6915 spin_unlock_irqrestore(&phba->hbalock, iflag);
6916 return;
6917 }