Merge branch 'for-linus' of git://android.git.kernel.org/kernel/tegra
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / lpfc / lpfc_els.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44
45 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
46 struct lpfc_iocbq *);
47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
48 struct lpfc_iocbq *);
49 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
50 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
51 struct lpfc_nodelist *ndlp, uint8_t retry);
52 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
53 struct lpfc_iocbq *iocb);
54
55 static int lpfc_max_els_tries = 3;
56
57 /**
58 * lpfc_els_chk_latt - Check host link attention event for a vport
59 * @vport: pointer to a host virtual N_Port data structure.
60 *
61 * This routine checks whether there is an outstanding host link
62 * attention event during the discovery process with the @vport. It is done
63 * by reading the HBA's Host Attention (HA) register. If there is any host
64 * link attention events during this @vport's discovery process, the @vport
65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
66 * be issued if the link state is not already in host link cleared state,
67 * and a return code shall indicate whether the host link attention event
68 * had happened.
69 *
70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
71 * state in LPFC_VPORT_READY, the request for checking host link attention
72 * event will be ignored and a return code shall indicate no host link
73 * attention event had happened.
74 *
75 * Return codes
76 * 0 - no host link attention event happened
77 * 1 - host link attention event happened
78 **/
79 int
80 lpfc_els_chk_latt(struct lpfc_vport *vport)
81 {
82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
83 struct lpfc_hba *phba = vport->phba;
84 uint32_t ha_copy;
85
86 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN ||
88 phba->sli_rev > LPFC_SLI_REV3)
89 return 0;
90
91 /* Read the HBA Host Attention Register */
92 ha_copy = readl(phba->HAregaddr);
93
94 if (!(ha_copy & HA_LATT))
95 return 0;
96
97 /* Pending Link Event during Discovery */
98 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
99 "0237 Pending Link Event during "
100 "Discovery: State x%x\n",
101 phba->pport->port_state);
102
103 /* CLEAR_LA should re-enable link attention events and
104 * we should then imediately take a LATT event. The
105 * LATT processing should call lpfc_linkdown() which
106 * will cleanup any left over in-progress discovery
107 * events.
108 */
109 spin_lock_irq(shost->host_lock);
110 vport->fc_flag |= FC_ABORT_DISCOVERY;
111 spin_unlock_irq(shost->host_lock);
112
113 if (phba->link_state != LPFC_CLEAR_LA)
114 lpfc_issue_clear_la(phba, vport);
115
116 return 1;
117 }
118
119 /**
120 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
121 * @vport: pointer to a host virtual N_Port data structure.
122 * @expectRsp: flag indicating whether response is expected.
123 * @cmdSize: size of the ELS command.
124 * @retry: number of retries to the command IOCB when it fails.
125 * @ndlp: pointer to a node-list data structure.
126 * @did: destination identifier.
127 * @elscmd: the ELS command code.
128 *
129 * This routine is used for allocating a lpfc-IOCB data structure from
130 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
131 * passed into the routine for discovery state machine to issue an Extended
132 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
133 * and preparation routine that is used by all the discovery state machine
134 * routines and the ELS command-specific fields will be later set up by
135 * the individual discovery machine routines after calling this routine
136 * allocating and preparing a generic IOCB data structure. It fills in the
137 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
138 * payload and response payload (if expected). The reference count on the
139 * ndlp is incremented by 1 and the reference to the ndlp is put into
140 * context1 of the IOCB data structure for this IOCB to hold the ndlp
141 * reference for the command's callback function to access later.
142 *
143 * Return code
144 * Pointer to the newly allocated/prepared els iocb data structure
145 * NULL - when els iocb data structure allocation/preparation failed
146 **/
147 struct lpfc_iocbq *
148 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
149 uint16_t cmdSize, uint8_t retry,
150 struct lpfc_nodelist *ndlp, uint32_t did,
151 uint32_t elscmd)
152 {
153 struct lpfc_hba *phba = vport->phba;
154 struct lpfc_iocbq *elsiocb;
155 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
156 struct ulp_bde64 *bpl;
157 IOCB_t *icmd;
158
159
160 if (!lpfc_is_link_up(phba))
161 return NULL;
162
163 /* Allocate buffer for command iocb */
164 elsiocb = lpfc_sli_get_iocbq(phba);
165
166 if (elsiocb == NULL)
167 return NULL;
168
169 /*
170 * If this command is for fabric controller and HBA running
171 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
172 */
173 if ((did == Fabric_DID) &&
174 (phba->hba_flag & HBA_FIP_SUPPORT) &&
175 ((elscmd == ELS_CMD_FLOGI) ||
176 (elscmd == ELS_CMD_FDISC) ||
177 (elscmd == ELS_CMD_LOGO)))
178 switch (elscmd) {
179 case ELS_CMD_FLOGI:
180 elsiocb->iocb_flag |=
181 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
182 & LPFC_FIP_ELS_ID_MASK);
183 break;
184 case ELS_CMD_FDISC:
185 elsiocb->iocb_flag |=
186 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
187 & LPFC_FIP_ELS_ID_MASK);
188 break;
189 case ELS_CMD_LOGO:
190 elsiocb->iocb_flag |=
191 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
192 & LPFC_FIP_ELS_ID_MASK);
193 break;
194 }
195 else
196 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
197
198 icmd = &elsiocb->iocb;
199
200 /* fill in BDEs for command */
201 /* Allocate buffer for command payload */
202 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
203 if (pcmd)
204 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
205 if (!pcmd || !pcmd->virt)
206 goto els_iocb_free_pcmb_exit;
207
208 INIT_LIST_HEAD(&pcmd->list);
209
210 /* Allocate buffer for response payload */
211 if (expectRsp) {
212 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
213 if (prsp)
214 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
215 &prsp->phys);
216 if (!prsp || !prsp->virt)
217 goto els_iocb_free_prsp_exit;
218 INIT_LIST_HEAD(&prsp->list);
219 } else
220 prsp = NULL;
221
222 /* Allocate buffer for Buffer ptr list */
223 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
224 if (pbuflist)
225 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
226 &pbuflist->phys);
227 if (!pbuflist || !pbuflist->virt)
228 goto els_iocb_free_pbuf_exit;
229
230 INIT_LIST_HEAD(&pbuflist->list);
231
232 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
233 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
234 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
235 icmd->un.elsreq64.remoteID = did; /* DID */
236 if (expectRsp) {
237 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
238 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
239 icmd->ulpTimeout = phba->fc_ratov * 2;
240 } else {
241 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
242 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
243 }
244 icmd->ulpBdeCount = 1;
245 icmd->ulpLe = 1;
246 icmd->ulpClass = CLASS3;
247
248 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
249 icmd->un.elsreq64.myID = vport->fc_myDID;
250
251 /* For ELS_REQUEST64_CR, use the VPI by default */
252 icmd->ulpContext = vport->vpi + phba->vpi_base;
253 icmd->ulpCt_h = 0;
254 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
255 if (elscmd == ELS_CMD_ECHO)
256 icmd->ulpCt_l = 0; /* context = invalid RPI */
257 else
258 icmd->ulpCt_l = 1; /* context = VPI */
259 }
260
261 bpl = (struct ulp_bde64 *) pbuflist->virt;
262 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
263 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
264 bpl->tus.f.bdeSize = cmdSize;
265 bpl->tus.f.bdeFlags = 0;
266 bpl->tus.w = le32_to_cpu(bpl->tus.w);
267
268 if (expectRsp) {
269 bpl++;
270 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
271 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
272 bpl->tus.f.bdeSize = FCELSSIZE;
273 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
274 bpl->tus.w = le32_to_cpu(bpl->tus.w);
275 }
276
277 /* prevent preparing iocb with NULL ndlp reference */
278 elsiocb->context1 = lpfc_nlp_get(ndlp);
279 if (!elsiocb->context1)
280 goto els_iocb_free_pbuf_exit;
281 elsiocb->context2 = pcmd;
282 elsiocb->context3 = pbuflist;
283 elsiocb->retry = retry;
284 elsiocb->vport = vport;
285 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
286
287 if (prsp) {
288 list_add(&prsp->list, &pcmd->list);
289 }
290 if (expectRsp) {
291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
293 "0116 Xmit ELS command x%x to remote "
294 "NPORT x%x I/O tag: x%x, port state: x%x\n",
295 elscmd, did, elsiocb->iotag,
296 vport->port_state);
297 } else {
298 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
299 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
300 "0117 Xmit ELS response x%x to remote "
301 "NPORT x%x I/O tag: x%x, size: x%x\n",
302 elscmd, ndlp->nlp_DID, elsiocb->iotag,
303 cmdSize);
304 }
305 return elsiocb;
306
307 els_iocb_free_pbuf_exit:
308 if (expectRsp)
309 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
310 kfree(pbuflist);
311
312 els_iocb_free_prsp_exit:
313 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
314 kfree(prsp);
315
316 els_iocb_free_pcmb_exit:
317 kfree(pcmd);
318 lpfc_sli_release_iocbq(phba, elsiocb);
319 return NULL;
320 }
321
322 /**
323 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
324 * @vport: pointer to a host virtual N_Port data structure.
325 *
326 * This routine issues a fabric registration login for a @vport. An
327 * active ndlp node with Fabric_DID must already exist for this @vport.
328 * The routine invokes two mailbox commands to carry out fabric registration
329 * login through the HBA firmware: the first mailbox command requests the
330 * HBA to perform link configuration for the @vport; and the second mailbox
331 * command requests the HBA to perform the actual fabric registration login
332 * with the @vport.
333 *
334 * Return code
335 * 0 - successfully issued fabric registration login for @vport
336 * -ENXIO -- failed to issue fabric registration login for @vport
337 **/
338 int
339 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
340 {
341 struct lpfc_hba *phba = vport->phba;
342 LPFC_MBOXQ_t *mbox;
343 struct lpfc_dmabuf *mp;
344 struct lpfc_nodelist *ndlp;
345 struct serv_parm *sp;
346 int rc;
347 int err = 0;
348
349 sp = &phba->fc_fabparam;
350 ndlp = lpfc_findnode_did(vport, Fabric_DID);
351 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
352 err = 1;
353 goto fail;
354 }
355
356 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
357 if (!mbox) {
358 err = 2;
359 goto fail;
360 }
361
362 vport->port_state = LPFC_FABRIC_CFG_LINK;
363 lpfc_config_link(phba, mbox);
364 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
365 mbox->vport = vport;
366
367 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
368 if (rc == MBX_NOT_FINISHED) {
369 err = 3;
370 goto fail_free_mbox;
371 }
372
373 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
374 if (!mbox) {
375 err = 4;
376 goto fail;
377 }
378 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
379 ndlp->nlp_rpi);
380 if (rc) {
381 err = 5;
382 goto fail_free_mbox;
383 }
384
385 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
386 mbox->vport = vport;
387 /* increment the reference count on ndlp to hold reference
388 * for the callback routine.
389 */
390 mbox->context2 = lpfc_nlp_get(ndlp);
391
392 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
393 if (rc == MBX_NOT_FINISHED) {
394 err = 6;
395 goto fail_issue_reg_login;
396 }
397
398 return 0;
399
400 fail_issue_reg_login:
401 /* decrement the reference count on ndlp just incremented
402 * for the failed mbox command.
403 */
404 lpfc_nlp_put(ndlp);
405 mp = (struct lpfc_dmabuf *) mbox->context1;
406 lpfc_mbuf_free(phba, mp->virt, mp->phys);
407 kfree(mp);
408 fail_free_mbox:
409 mempool_free(mbox, phba->mbox_mem_pool);
410
411 fail:
412 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
413 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
414 "0249 Cannot issue Register Fabric login: Err %d\n", err);
415 return -ENXIO;
416 }
417
418 /**
419 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
420 * @vport: pointer to a host virtual N_Port data structure.
421 *
422 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
423 * the @vport. This mailbox command is necessary for FCoE only.
424 *
425 * Return code
426 * 0 - successfully issued REG_VFI for @vport
427 * A failure code otherwise.
428 **/
429 static int
430 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
431 {
432 struct lpfc_hba *phba = vport->phba;
433 LPFC_MBOXQ_t *mboxq;
434 struct lpfc_nodelist *ndlp;
435 struct serv_parm *sp;
436 struct lpfc_dmabuf *dmabuf;
437 int rc = 0;
438
439 sp = &phba->fc_fabparam;
440 ndlp = lpfc_findnode_did(vport, Fabric_DID);
441 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
442 rc = -ENODEV;
443 goto fail;
444 }
445
446 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
447 if (!dmabuf) {
448 rc = -ENOMEM;
449 goto fail;
450 }
451 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
452 if (!dmabuf->virt) {
453 rc = -ENOMEM;
454 goto fail_free_dmabuf;
455 }
456 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
457 if (!mboxq) {
458 rc = -ENOMEM;
459 goto fail_free_coherent;
460 }
461 vport->port_state = LPFC_FABRIC_CFG_LINK;
462 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
463 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
464 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
465 mboxq->vport = vport;
466 mboxq->context1 = dmabuf;
467 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
468 if (rc == MBX_NOT_FINISHED) {
469 rc = -ENXIO;
470 goto fail_free_mbox;
471 }
472 return 0;
473
474 fail_free_mbox:
475 mempool_free(mboxq, phba->mbox_mem_pool);
476 fail_free_coherent:
477 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
478 fail_free_dmabuf:
479 kfree(dmabuf);
480 fail:
481 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
482 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
483 "0289 Issue Register VFI failed: Err %d\n", rc);
484 return rc;
485 }
486
487 /**
488 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
489 * @vport: pointer to a host virtual N_Port data structure.
490 * @sp: pointer to service parameter data structure.
491 *
492 * This routine is called from FLOGI/FDISC completion handler functions.
493 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
494 * node nodename is changed in the completion service parameter else return
495 * 0. This function also set flag in the vport data structure to delay
496 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
497 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
498 * node nodename is changed in the completion service parameter.
499 *
500 * Return code
501 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
502 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
503 *
504 **/
505 static uint8_t
506 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
507 struct serv_parm *sp)
508 {
509 uint8_t fabric_param_changed = 0;
510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
511
512 if ((vport->fc_prevDID != vport->fc_myDID) ||
513 memcmp(&vport->fabric_portname, &sp->portName,
514 sizeof(struct lpfc_name)) ||
515 memcmp(&vport->fabric_nodename, &sp->nodeName,
516 sizeof(struct lpfc_name)))
517 fabric_param_changed = 1;
518
519 /*
520 * Word 1 Bit 31 in common service parameter is overloaded.
521 * Word 1 Bit 31 in FLOGI request is multiple NPort request
522 * Word 1 Bit 31 in FLOGI response is clean address bit
523 *
524 * If fabric parameter is changed and clean address bit is
525 * cleared delay nport discovery if
526 * - vport->fc_prevDID != 0 (not initial discovery) OR
527 * - lpfc_delay_discovery module parameter is set.
528 */
529 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
530 (vport->fc_prevDID || lpfc_delay_discovery)) {
531 spin_lock_irq(shost->host_lock);
532 vport->fc_flag |= FC_DISC_DELAYED;
533 spin_unlock_irq(shost->host_lock);
534 }
535
536 return fabric_param_changed;
537 }
538
539
540 /**
541 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
542 * @vport: pointer to a host virtual N_Port data structure.
543 * @ndlp: pointer to a node-list data structure.
544 * @sp: pointer to service parameter data structure.
545 * @irsp: pointer to the IOCB within the lpfc response IOCB.
546 *
547 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
548 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
549 * port in a fabric topology. It properly sets up the parameters to the @ndlp
550 * from the IOCB response. It also check the newly assigned N_Port ID to the
551 * @vport against the previously assigned N_Port ID. If it is different from
552 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
553 * is invoked on all the remaining nodes with the @vport to unregister the
554 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
555 * is invoked to register login to the fabric.
556 *
557 * Return code
558 * 0 - Success (currently, always return 0)
559 **/
560 static int
561 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
562 struct serv_parm *sp, IOCB_t *irsp)
563 {
564 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
565 struct lpfc_hba *phba = vport->phba;
566 struct lpfc_nodelist *np;
567 struct lpfc_nodelist *next_np;
568 uint8_t fabric_param_changed;
569
570 spin_lock_irq(shost->host_lock);
571 vport->fc_flag |= FC_FABRIC;
572 spin_unlock_irq(shost->host_lock);
573
574 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
575 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
576 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
577
578 phba->fc_edtovResol = sp->cmn.edtovResolution;
579 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
580
581 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
582 spin_lock_irq(shost->host_lock);
583 vport->fc_flag |= FC_PUBLIC_LOOP;
584 spin_unlock_irq(shost->host_lock);
585 }
586
587 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
588 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
589 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
590 ndlp->nlp_class_sup = 0;
591 if (sp->cls1.classValid)
592 ndlp->nlp_class_sup |= FC_COS_CLASS1;
593 if (sp->cls2.classValid)
594 ndlp->nlp_class_sup |= FC_COS_CLASS2;
595 if (sp->cls3.classValid)
596 ndlp->nlp_class_sup |= FC_COS_CLASS3;
597 if (sp->cls4.classValid)
598 ndlp->nlp_class_sup |= FC_COS_CLASS4;
599 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
600 sp->cmn.bbRcvSizeLsb;
601
602 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
603 memcpy(&vport->fabric_portname, &sp->portName,
604 sizeof(struct lpfc_name));
605 memcpy(&vport->fabric_nodename, &sp->nodeName,
606 sizeof(struct lpfc_name));
607 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
608
609 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
610 if (sp->cmn.response_multiple_NPort) {
611 lpfc_printf_vlog(vport, KERN_WARNING,
612 LOG_ELS | LOG_VPORT,
613 "1816 FLOGI NPIV supported, "
614 "response data 0x%x\n",
615 sp->cmn.response_multiple_NPort);
616 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
617 } else {
618 /* Because we asked f/w for NPIV it still expects us
619 to call reg_vnpid atleast for the physcial host */
620 lpfc_printf_vlog(vport, KERN_WARNING,
621 LOG_ELS | LOG_VPORT,
622 "1817 Fabric does not support NPIV "
623 "- configuring single port mode.\n");
624 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
625 }
626 }
627
628 if (fabric_param_changed &&
629 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
630
631 /* If our NportID changed, we need to ensure all
632 * remaining NPORTs get unreg_login'ed.
633 */
634 list_for_each_entry_safe(np, next_np,
635 &vport->fc_nodes, nlp_listp) {
636 if (!NLP_CHK_NODE_ACT(np))
637 continue;
638 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
639 !(np->nlp_flag & NLP_NPR_ADISC))
640 continue;
641 spin_lock_irq(shost->host_lock);
642 np->nlp_flag &= ~NLP_NPR_ADISC;
643 spin_unlock_irq(shost->host_lock);
644 lpfc_unreg_rpi(vport, np);
645 }
646 lpfc_cleanup_pending_mbox(vport);
647
648 if (phba->sli_rev == LPFC_SLI_REV4)
649 lpfc_sli4_unreg_all_rpis(vport);
650
651 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
652 lpfc_mbx_unreg_vpi(vport);
653 spin_lock_irq(shost->host_lock);
654 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
655 spin_unlock_irq(shost->host_lock);
656 }
657 /*
658 * If VPI is unreged, driver need to do INIT_VPI
659 * before re-registering
660 */
661 if (phba->sli_rev == LPFC_SLI_REV4) {
662 spin_lock_irq(shost->host_lock);
663 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
664 spin_unlock_irq(shost->host_lock);
665 }
666 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
667 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
668 /*
669 * Driver needs to re-reg VPI in order for f/w
670 * to update the MAC address.
671 */
672 lpfc_register_new_vport(phba, vport, ndlp);
673 return 0;
674 }
675
676 if (phba->sli_rev < LPFC_SLI_REV4) {
677 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
678 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
679 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
680 lpfc_register_new_vport(phba, vport, ndlp);
681 else
682 lpfc_issue_fabric_reglogin(vport);
683 } else {
684 ndlp->nlp_type |= NLP_FABRIC;
685 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
686 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
687 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
688 lpfc_start_fdiscs(phba);
689 lpfc_do_scr_ns_plogi(phba, vport);
690 } else if (vport->fc_flag & FC_VFI_REGISTERED)
691 lpfc_issue_init_vpi(vport);
692 else
693 lpfc_issue_reg_vfi(vport);
694 }
695 return 0;
696 }
697 /**
698 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
699 * @vport: pointer to a host virtual N_Port data structure.
700 * @ndlp: pointer to a node-list data structure.
701 * @sp: pointer to service parameter data structure.
702 *
703 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
704 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
705 * in a point-to-point topology. First, the @vport's N_Port Name is compared
706 * with the received N_Port Name: if the @vport's N_Port Name is greater than
707 * the received N_Port Name lexicographically, this node shall assign local
708 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
709 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
710 * this node shall just wait for the remote node to issue PLOGI and assign
711 * N_Port IDs.
712 *
713 * Return code
714 * 0 - Success
715 * -ENXIO - Fail
716 **/
717 static int
718 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
719 struct serv_parm *sp)
720 {
721 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
722 struct lpfc_hba *phba = vport->phba;
723 LPFC_MBOXQ_t *mbox;
724 int rc;
725
726 spin_lock_irq(shost->host_lock);
727 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
728 spin_unlock_irq(shost->host_lock);
729
730 phba->fc_edtov = FF_DEF_EDTOV;
731 phba->fc_ratov = FF_DEF_RATOV;
732 rc = memcmp(&vport->fc_portname, &sp->portName,
733 sizeof(vport->fc_portname));
734 if (rc >= 0) {
735 /* This side will initiate the PLOGI */
736 spin_lock_irq(shost->host_lock);
737 vport->fc_flag |= FC_PT2PT_PLOGI;
738 spin_unlock_irq(shost->host_lock);
739
740 /*
741 * N_Port ID cannot be 0, set our to LocalID the other
742 * side will be RemoteID.
743 */
744
745 /* not equal */
746 if (rc)
747 vport->fc_myDID = PT2PT_LocalID;
748
749 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
750 if (!mbox)
751 goto fail;
752
753 lpfc_config_link(phba, mbox);
754
755 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
756 mbox->vport = vport;
757 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
758 if (rc == MBX_NOT_FINISHED) {
759 mempool_free(mbox, phba->mbox_mem_pool);
760 goto fail;
761 }
762 /* Decrement ndlp reference count indicating that ndlp can be
763 * safely released when other references to it are done.
764 */
765 lpfc_nlp_put(ndlp);
766
767 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
768 if (!ndlp) {
769 /*
770 * Cannot find existing Fabric ndlp, so allocate a
771 * new one
772 */
773 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
774 if (!ndlp)
775 goto fail;
776 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
777 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
778 ndlp = lpfc_enable_node(vport, ndlp,
779 NLP_STE_UNUSED_NODE);
780 if(!ndlp)
781 goto fail;
782 }
783
784 memcpy(&ndlp->nlp_portname, &sp->portName,
785 sizeof(struct lpfc_name));
786 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
787 sizeof(struct lpfc_name));
788 /* Set state will put ndlp onto node list if not already done */
789 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
790 spin_lock_irq(shost->host_lock);
791 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
792 spin_unlock_irq(shost->host_lock);
793 } else
794 /* This side will wait for the PLOGI, decrement ndlp reference
795 * count indicating that ndlp can be released when other
796 * references to it are done.
797 */
798 lpfc_nlp_put(ndlp);
799
800 /* If we are pt2pt with another NPort, force NPIV off! */
801 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
802
803 spin_lock_irq(shost->host_lock);
804 vport->fc_flag |= FC_PT2PT;
805 spin_unlock_irq(shost->host_lock);
806
807 /* Start discovery - this should just do CLEAR_LA */
808 lpfc_disc_start(vport);
809 return 0;
810 fail:
811 return -ENXIO;
812 }
813
814 /**
815 * lpfc_cmpl_els_flogi - Completion callback function for flogi
816 * @phba: pointer to lpfc hba data structure.
817 * @cmdiocb: pointer to lpfc command iocb data structure.
818 * @rspiocb: pointer to lpfc response iocb data structure.
819 *
820 * This routine is the top-level completion callback function for issuing
821 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
822 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
823 * retry has been made (either immediately or delayed with lpfc_els_retry()
824 * returning 1), the command IOCB will be released and function returned.
825 * If the retry attempt has been given up (possibly reach the maximum
826 * number of retries), one additional decrement of ndlp reference shall be
827 * invoked before going out after releasing the command IOCB. This will
828 * actually release the remote node (Note, lpfc_els_free_iocb() will also
829 * invoke one decrement of ndlp reference count). If no error reported in
830 * the IOCB status, the command Port ID field is used to determine whether
831 * this is a point-to-point topology or a fabric topology: if the Port ID
832 * field is assigned, it is a fabric topology; otherwise, it is a
833 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
834 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
835 * specific topology completion conditions.
836 **/
837 static void
838 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
839 struct lpfc_iocbq *rspiocb)
840 {
841 struct lpfc_vport *vport = cmdiocb->vport;
842 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
843 IOCB_t *irsp = &rspiocb->iocb;
844 struct lpfc_nodelist *ndlp = cmdiocb->context1;
845 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
846 struct serv_parm *sp;
847 uint16_t fcf_index;
848 int rc;
849
850 /* Check to see if link went down during discovery */
851 if (lpfc_els_chk_latt(vport)) {
852 /* One additional decrement on node reference count to
853 * trigger the release of the node
854 */
855 lpfc_nlp_put(ndlp);
856 goto out;
857 }
858
859 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
860 "FLOGI cmpl: status:x%x/x%x state:x%x",
861 irsp->ulpStatus, irsp->un.ulpWord[4],
862 vport->port_state);
863
864 if (irsp->ulpStatus) {
865 /*
866 * In case of FIP mode, perform roundrobin FCF failover
867 * due to new FCF discovery
868 */
869 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
870 (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
871 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
872 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
873 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
874 "2611 FLOGI failed on FCF (x%x), "
875 "status:x%x/x%x, tmo:x%x, perform "
876 "roundrobin FCF failover\n",
877 phba->fcf.current_rec.fcf_indx,
878 irsp->ulpStatus, irsp->un.ulpWord[4],
879 irsp->ulpTimeout);
880 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
881 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
882 if (rc)
883 goto out;
884 }
885
886 /* FLOGI failure */
887 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
888 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
889 irsp->ulpStatus, irsp->un.ulpWord[4],
890 irsp->ulpTimeout);
891
892 /* Check for retry */
893 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
894 goto out;
895
896 /* FLOGI failure */
897 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
898 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
899 irsp->ulpStatus, irsp->un.ulpWord[4],
900 irsp->ulpTimeout);
901
902 /* FLOGI failed, so there is no fabric */
903 spin_lock_irq(shost->host_lock);
904 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
905 spin_unlock_irq(shost->host_lock);
906
907 /* If private loop, then allow max outstanding els to be
908 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
909 * alpa map would take too long otherwise.
910 */
911 if (phba->alpa_map[0] == 0) {
912 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
913 if ((phba->sli_rev == LPFC_SLI_REV4) &&
914 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
915 (vport->fc_prevDID != vport->fc_myDID))) {
916 if (vport->fc_flag & FC_VFI_REGISTERED)
917 lpfc_sli4_unreg_all_rpis(vport);
918 lpfc_issue_reg_vfi(vport);
919 lpfc_nlp_put(ndlp);
920 goto out;
921 }
922 }
923 goto flogifail;
924 }
925 spin_lock_irq(shost->host_lock);
926 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
927 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
928 spin_unlock_irq(shost->host_lock);
929
930 /*
931 * The FLogI succeeded. Sync the data for the CPU before
932 * accessing it.
933 */
934 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
935
936 sp = prsp->virt + sizeof(uint32_t);
937
938 /* FLOGI completes successfully */
939 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
940 "0101 FLOGI completes successfully "
941 "Data: x%x x%x x%x x%x\n",
942 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
943 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
944
945 if (vport->port_state == LPFC_FLOGI) {
946 /*
947 * If Common Service Parameters indicate Nport
948 * we are point to point, if Fport we are Fabric.
949 */
950 if (sp->cmn.fPort)
951 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
952 else if (!(phba->hba_flag & HBA_FCOE_MODE))
953 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
954 else {
955 lpfc_printf_vlog(vport, KERN_ERR,
956 LOG_FIP | LOG_ELS,
957 "2831 FLOGI response with cleared Fabric "
958 "bit fcf_index 0x%x "
959 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
960 "Fabric Name "
961 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
962 phba->fcf.current_rec.fcf_indx,
963 phba->fcf.current_rec.switch_name[0],
964 phba->fcf.current_rec.switch_name[1],
965 phba->fcf.current_rec.switch_name[2],
966 phba->fcf.current_rec.switch_name[3],
967 phba->fcf.current_rec.switch_name[4],
968 phba->fcf.current_rec.switch_name[5],
969 phba->fcf.current_rec.switch_name[6],
970 phba->fcf.current_rec.switch_name[7],
971 phba->fcf.current_rec.fabric_name[0],
972 phba->fcf.current_rec.fabric_name[1],
973 phba->fcf.current_rec.fabric_name[2],
974 phba->fcf.current_rec.fabric_name[3],
975 phba->fcf.current_rec.fabric_name[4],
976 phba->fcf.current_rec.fabric_name[5],
977 phba->fcf.current_rec.fabric_name[6],
978 phba->fcf.current_rec.fabric_name[7]);
979 lpfc_nlp_put(ndlp);
980 spin_lock_irq(&phba->hbalock);
981 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
982 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
983 spin_unlock_irq(&phba->hbalock);
984 goto out;
985 }
986 if (!rc) {
987 /* Mark the FCF discovery process done */
988 if (phba->hba_flag & HBA_FIP_SUPPORT)
989 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
990 LOG_ELS,
991 "2769 FLOGI to FCF (x%x) "
992 "completed successfully\n",
993 phba->fcf.current_rec.fcf_indx);
994 spin_lock_irq(&phba->hbalock);
995 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
996 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
997 spin_unlock_irq(&phba->hbalock);
998 goto out;
999 }
1000 }
1001
1002 flogifail:
1003 lpfc_nlp_put(ndlp);
1004
1005 if (!lpfc_error_lost_link(irsp)) {
1006 /* FLOGI failed, so just use loop map to make discovery list */
1007 lpfc_disc_list_loopmap(vport);
1008
1009 /* Start discovery */
1010 lpfc_disc_start(vport);
1011 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1012 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
1013 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
1014 (phba->link_state != LPFC_CLEAR_LA)) {
1015 /* If FLOGI failed enable link interrupt. */
1016 lpfc_issue_clear_la(phba, vport);
1017 }
1018 out:
1019 lpfc_els_free_iocb(phba, cmdiocb);
1020 }
1021
1022 /**
1023 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1024 * @vport: pointer to a host virtual N_Port data structure.
1025 * @ndlp: pointer to a node-list data structure.
1026 * @retry: number of retries to the command IOCB.
1027 *
1028 * This routine issues a Fabric Login (FLOGI) Request ELS command
1029 * for a @vport. The initiator service parameters are put into the payload
1030 * of the FLOGI Request IOCB and the top-level callback function pointer
1031 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1032 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1033 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1034 *
1035 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1036 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1037 * will be stored into the context1 field of the IOCB for the completion
1038 * callback function to the FLOGI ELS command.
1039 *
1040 * Return code
1041 * 0 - successfully issued flogi iocb for @vport
1042 * 1 - failed to issue flogi iocb for @vport
1043 **/
1044 static int
1045 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1046 uint8_t retry)
1047 {
1048 struct lpfc_hba *phba = vport->phba;
1049 struct serv_parm *sp;
1050 IOCB_t *icmd;
1051 struct lpfc_iocbq *elsiocb;
1052 struct lpfc_sli_ring *pring;
1053 uint8_t *pcmd;
1054 uint16_t cmdsize;
1055 uint32_t tmo;
1056 int rc;
1057
1058 pring = &phba->sli.ring[LPFC_ELS_RING];
1059
1060 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1061 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1062 ndlp->nlp_DID, ELS_CMD_FLOGI);
1063
1064 if (!elsiocb)
1065 return 1;
1066
1067 icmd = &elsiocb->iocb;
1068 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1069
1070 /* For FLOGI request, remainder of payload is service parameters */
1071 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1072 pcmd += sizeof(uint32_t);
1073 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1074 sp = (struct serv_parm *) pcmd;
1075
1076 /* Setup CSPs accordingly for Fabric */
1077 sp->cmn.e_d_tov = 0;
1078 sp->cmn.w2.r_a_tov = 0;
1079 sp->cls1.classValid = 0;
1080 sp->cls2.seqDelivery = 1;
1081 sp->cls3.seqDelivery = 1;
1082 if (sp->cmn.fcphLow < FC_PH3)
1083 sp->cmn.fcphLow = FC_PH3;
1084 if (sp->cmn.fcphHigh < FC_PH3)
1085 sp->cmn.fcphHigh = FC_PH3;
1086
1087 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1088 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1089 LPFC_SLI_INTF_IF_TYPE_0)) {
1090 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1091 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1092 /* FLOGI needs to be 3 for WQE FCFI */
1093 /* Set the fcfi to the fcfi we registered with */
1094 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1095 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1096 sp->cmn.request_multiple_Nport = 1;
1097 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1098 icmd->ulpCt_h = 1;
1099 icmd->ulpCt_l = 0;
1100 }
1101
1102 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1103 icmd->un.elsreq64.myID = 0;
1104 icmd->un.elsreq64.fl = 1;
1105 }
1106
1107 tmo = phba->fc_ratov;
1108 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1109 lpfc_set_disctmo(vport);
1110 phba->fc_ratov = tmo;
1111
1112 phba->fc_stat.elsXmitFLOGI++;
1113 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1114
1115 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1116 "Issue FLOGI: opt:x%x",
1117 phba->sli3_options, 0, 0);
1118
1119 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1120 if (rc == IOCB_ERROR) {
1121 lpfc_els_free_iocb(phba, elsiocb);
1122 return 1;
1123 }
1124 return 0;
1125 }
1126
1127 /**
1128 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1129 * @phba: pointer to lpfc hba data structure.
1130 *
1131 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1132 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1133 * list and issues an abort IOCB commond on each outstanding IOCB that
1134 * contains a active Fabric_DID ndlp. Note that this function is to issue
1135 * the abort IOCB command on all the outstanding IOCBs, thus when this
1136 * function returns, it does not guarantee all the IOCBs are actually aborted.
1137 *
1138 * Return code
1139 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1140 **/
1141 int
1142 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1143 {
1144 struct lpfc_sli_ring *pring;
1145 struct lpfc_iocbq *iocb, *next_iocb;
1146 struct lpfc_nodelist *ndlp;
1147 IOCB_t *icmd;
1148
1149 /* Abort outstanding I/O on NPort <nlp_DID> */
1150 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1151 "0201 Abort outstanding I/O on NPort x%x\n",
1152 Fabric_DID);
1153
1154 pring = &phba->sli.ring[LPFC_ELS_RING];
1155
1156 /*
1157 * Check the txcmplq for an iocb that matches the nport the driver is
1158 * searching for.
1159 */
1160 spin_lock_irq(&phba->hbalock);
1161 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1162 icmd = &iocb->iocb;
1163 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
1164 icmd->un.elsreq64.bdl.ulpIoTag32) {
1165 ndlp = (struct lpfc_nodelist *)(iocb->context1);
1166 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1167 (ndlp->nlp_DID == Fabric_DID))
1168 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1169 }
1170 }
1171 spin_unlock_irq(&phba->hbalock);
1172
1173 return 0;
1174 }
1175
1176 /**
1177 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1178 * @vport: pointer to a host virtual N_Port data structure.
1179 *
1180 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1181 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1182 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1183 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1184 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1185 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1186 * @vport.
1187 *
1188 * Return code
1189 * 0 - failed to issue initial flogi for @vport
1190 * 1 - successfully issued initial flogi for @vport
1191 **/
1192 int
1193 lpfc_initial_flogi(struct lpfc_vport *vport)
1194 {
1195 struct lpfc_hba *phba = vport->phba;
1196 struct lpfc_nodelist *ndlp;
1197
1198 vport->port_state = LPFC_FLOGI;
1199 lpfc_set_disctmo(vport);
1200
1201 /* First look for the Fabric ndlp */
1202 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1203 if (!ndlp) {
1204 /* Cannot find existing Fabric ndlp, so allocate a new one */
1205 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1206 if (!ndlp)
1207 return 0;
1208 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1209 /* Set the node type */
1210 ndlp->nlp_type |= NLP_FABRIC;
1211 /* Put ndlp onto node list */
1212 lpfc_enqueue_node(vport, ndlp);
1213 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1214 /* re-setup ndlp without removing from node list */
1215 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1216 if (!ndlp)
1217 return 0;
1218 }
1219
1220 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1221 /* This decrement of reference count to node shall kick off
1222 * the release of the node.
1223 */
1224 lpfc_nlp_put(ndlp);
1225 return 0;
1226 }
1227 return 1;
1228 }
1229
1230 /**
1231 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1232 * @vport: pointer to a host virtual N_Port data structure.
1233 *
1234 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1235 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1236 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1237 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1238 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1239 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1240 * @vport.
1241 *
1242 * Return code
1243 * 0 - failed to issue initial fdisc for @vport
1244 * 1 - successfully issued initial fdisc for @vport
1245 **/
1246 int
1247 lpfc_initial_fdisc(struct lpfc_vport *vport)
1248 {
1249 struct lpfc_hba *phba = vport->phba;
1250 struct lpfc_nodelist *ndlp;
1251
1252 /* First look for the Fabric ndlp */
1253 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1254 if (!ndlp) {
1255 /* Cannot find existing Fabric ndlp, so allocate a new one */
1256 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1257 if (!ndlp)
1258 return 0;
1259 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1260 /* Put ndlp onto node list */
1261 lpfc_enqueue_node(vport, ndlp);
1262 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1263 /* re-setup ndlp without removing from node list */
1264 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1265 if (!ndlp)
1266 return 0;
1267 }
1268
1269 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1270 /* decrement node reference count to trigger the release of
1271 * the node.
1272 */
1273 lpfc_nlp_put(ndlp);
1274 return 0;
1275 }
1276 return 1;
1277 }
1278
1279 /**
1280 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1281 * @vport: pointer to a host virtual N_Port data structure.
1282 *
1283 * This routine checks whether there are more remaining Port Logins
1284 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1285 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1286 * to issue ELS PLOGIs up to the configured discover threads with the
1287 * @vport (@vport->cfg_discovery_threads). The function also decrement
1288 * the @vport's num_disc_node by 1 if it is not already 0.
1289 **/
1290 void
1291 lpfc_more_plogi(struct lpfc_vport *vport)
1292 {
1293 int sentplogi;
1294
1295 if (vport->num_disc_nodes)
1296 vport->num_disc_nodes--;
1297
1298 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1299 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1300 "0232 Continue discovery with %d PLOGIs to go "
1301 "Data: x%x x%x x%x\n",
1302 vport->num_disc_nodes, vport->fc_plogi_cnt,
1303 vport->fc_flag, vport->port_state);
1304 /* Check to see if there are more PLOGIs to be sent */
1305 if (vport->fc_flag & FC_NLP_MORE)
1306 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1307 sentplogi = lpfc_els_disc_plogi(vport);
1308
1309 return;
1310 }
1311
1312 /**
1313 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1314 * @phba: pointer to lpfc hba data structure.
1315 * @prsp: pointer to response IOCB payload.
1316 * @ndlp: pointer to a node-list data structure.
1317 *
1318 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1319 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1320 * The following cases are considered N_Port confirmed:
1321 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1322 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1323 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1324 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1325 * 1) if there is a node on vport list other than the @ndlp with the same
1326 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1327 * on that node to release the RPI associated with the node; 2) if there is
1328 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1329 * into, a new node shall be allocated (or activated). In either case, the
1330 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1331 * be released and the new_ndlp shall be put on to the vport node list and
1332 * its pointer returned as the confirmed node.
1333 *
1334 * Note that before the @ndlp got "released", the keepDID from not-matching
1335 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1336 * of the @ndlp. This is because the release of @ndlp is actually to put it
1337 * into an inactive state on the vport node list and the vport node list
1338 * management algorithm does not allow two node with a same DID.
1339 *
1340 * Return code
1341 * pointer to the PLOGI N_Port @ndlp
1342 **/
1343 static struct lpfc_nodelist *
1344 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1345 struct lpfc_nodelist *ndlp)
1346 {
1347 struct lpfc_vport *vport = ndlp->vport;
1348 struct lpfc_nodelist *new_ndlp;
1349 struct lpfc_rport_data *rdata;
1350 struct fc_rport *rport;
1351 struct serv_parm *sp;
1352 uint8_t name[sizeof(struct lpfc_name)];
1353 uint32_t rc, keepDID = 0;
1354 int put_node;
1355 int put_rport;
1356 struct lpfc_node_rrqs rrq;
1357
1358 /* Fabric nodes can have the same WWPN so we don't bother searching
1359 * by WWPN. Just return the ndlp that was given to us.
1360 */
1361 if (ndlp->nlp_type & NLP_FABRIC)
1362 return ndlp;
1363
1364 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1365 memset(name, 0, sizeof(struct lpfc_name));
1366
1367 /* Now we find out if the NPort we are logging into, matches the WWPN
1368 * we have for that ndlp. If not, we have some work to do.
1369 */
1370 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1371
1372 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1373 return ndlp;
1374 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
1375
1376 if (!new_ndlp) {
1377 rc = memcmp(&ndlp->nlp_portname, name,
1378 sizeof(struct lpfc_name));
1379 if (!rc)
1380 return ndlp;
1381 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1382 if (!new_ndlp)
1383 return ndlp;
1384 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
1385 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1386 rc = memcmp(&ndlp->nlp_portname, name,
1387 sizeof(struct lpfc_name));
1388 if (!rc)
1389 return ndlp;
1390 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1391 NLP_STE_UNUSED_NODE);
1392 if (!new_ndlp)
1393 return ndlp;
1394 keepDID = new_ndlp->nlp_DID;
1395 if (phba->sli_rev == LPFC_SLI_REV4)
1396 memcpy(&rrq.xri_bitmap,
1397 &new_ndlp->active_rrqs.xri_bitmap,
1398 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1399 } else {
1400 keepDID = new_ndlp->nlp_DID;
1401 if (phba->sli_rev == LPFC_SLI_REV4)
1402 memcpy(&rrq.xri_bitmap,
1403 &new_ndlp->active_rrqs.xri_bitmap,
1404 sizeof(new_ndlp->active_rrqs.xri_bitmap));
1405 }
1406
1407 lpfc_unreg_rpi(vport, new_ndlp);
1408 new_ndlp->nlp_DID = ndlp->nlp_DID;
1409 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1410 if (phba->sli_rev == LPFC_SLI_REV4)
1411 memcpy(new_ndlp->active_rrqs.xri_bitmap,
1412 &ndlp->active_rrqs.xri_bitmap,
1413 sizeof(ndlp->active_rrqs.xri_bitmap));
1414
1415 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1416 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1417 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1418
1419 /* Set state will put new_ndlp on to node list if not already done */
1420 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1421
1422 /* Move this back to NPR state */
1423 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1424 /* The new_ndlp is replacing ndlp totally, so we need
1425 * to put ndlp on UNUSED list and try to free it.
1426 */
1427
1428 /* Fix up the rport accordingly */
1429 rport = ndlp->rport;
1430 if (rport) {
1431 rdata = rport->dd_data;
1432 if (rdata->pnode == ndlp) {
1433 lpfc_nlp_put(ndlp);
1434 ndlp->rport = NULL;
1435 rdata->pnode = lpfc_nlp_get(new_ndlp);
1436 new_ndlp->rport = rport;
1437 }
1438 new_ndlp->nlp_type = ndlp->nlp_type;
1439 }
1440 /* We shall actually free the ndlp with both nlp_DID and
1441 * nlp_portname fields equals 0 to avoid any ndlp on the
1442 * nodelist never to be used.
1443 */
1444 if (ndlp->nlp_DID == 0) {
1445 spin_lock_irq(&phba->ndlp_lock);
1446 NLP_SET_FREE_REQ(ndlp);
1447 spin_unlock_irq(&phba->ndlp_lock);
1448 }
1449
1450 /* Two ndlps cannot have the same did on the nodelist */
1451 ndlp->nlp_DID = keepDID;
1452 if (phba->sli_rev == LPFC_SLI_REV4)
1453 memcpy(&ndlp->active_rrqs.xri_bitmap,
1454 &rrq.xri_bitmap,
1455 sizeof(ndlp->active_rrqs.xri_bitmap));
1456 lpfc_drop_node(vport, ndlp);
1457 }
1458 else {
1459 lpfc_unreg_rpi(vport, ndlp);
1460 /* Two ndlps cannot have the same did */
1461 ndlp->nlp_DID = keepDID;
1462 if (phba->sli_rev == LPFC_SLI_REV4)
1463 memcpy(&ndlp->active_rrqs.xri_bitmap,
1464 &rrq.xri_bitmap,
1465 sizeof(ndlp->active_rrqs.xri_bitmap));
1466 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1467 /* Since we are swapping the ndlp passed in with the new one
1468 * and the did has already been swapped, copy over the
1469 * state and names.
1470 */
1471 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
1472 sizeof(struct lpfc_name));
1473 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
1474 sizeof(struct lpfc_name));
1475 new_ndlp->nlp_state = ndlp->nlp_state;
1476 /* Fix up the rport accordingly */
1477 rport = ndlp->rport;
1478 if (rport) {
1479 rdata = rport->dd_data;
1480 put_node = rdata->pnode != NULL;
1481 put_rport = ndlp->rport != NULL;
1482 rdata->pnode = NULL;
1483 ndlp->rport = NULL;
1484 if (put_node)
1485 lpfc_nlp_put(ndlp);
1486 if (put_rport)
1487 put_device(&rport->dev);
1488 }
1489 }
1490 return new_ndlp;
1491 }
1492
1493 /**
1494 * lpfc_end_rscn - Check and handle more rscn for a vport
1495 * @vport: pointer to a host virtual N_Port data structure.
1496 *
1497 * This routine checks whether more Registration State Change
1498 * Notifications (RSCNs) came in while the discovery state machine was in
1499 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1500 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1501 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1502 * handling the RSCNs.
1503 **/
1504 void
1505 lpfc_end_rscn(struct lpfc_vport *vport)
1506 {
1507 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1508
1509 if (vport->fc_flag & FC_RSCN_MODE) {
1510 /*
1511 * Check to see if more RSCNs came in while we were
1512 * processing this one.
1513 */
1514 if (vport->fc_rscn_id_cnt ||
1515 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1516 lpfc_els_handle_rscn(vport);
1517 else {
1518 spin_lock_irq(shost->host_lock);
1519 vport->fc_flag &= ~FC_RSCN_MODE;
1520 spin_unlock_irq(shost->host_lock);
1521 }
1522 }
1523 }
1524
1525 /**
1526 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1527 * @phba: pointer to lpfc hba data structure.
1528 * @cmdiocb: pointer to lpfc command iocb data structure.
1529 * @rspiocb: pointer to lpfc response iocb data structure.
1530 *
1531 * This routine will call the clear rrq function to free the rrq and
1532 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1533 * exist then the clear_rrq is still called because the rrq needs to
1534 * be freed.
1535 **/
1536
1537 static void
1538 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1539 struct lpfc_iocbq *rspiocb)
1540 {
1541 struct lpfc_vport *vport = cmdiocb->vport;
1542 IOCB_t *irsp;
1543 struct lpfc_nodelist *ndlp;
1544 struct lpfc_node_rrq *rrq;
1545
1546 /* we pass cmdiocb to state machine which needs rspiocb as well */
1547 rrq = cmdiocb->context_un.rrq;
1548 cmdiocb->context_un.rsp_iocb = rspiocb;
1549
1550 irsp = &rspiocb->iocb;
1551 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1552 "RRQ cmpl: status:x%x/x%x did:x%x",
1553 irsp->ulpStatus, irsp->un.ulpWord[4],
1554 irsp->un.elsreq64.remoteID);
1555
1556 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1557 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1558 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1559 "2882 RRQ completes to NPort x%x "
1560 "with no ndlp. Data: x%x x%x x%x\n",
1561 irsp->un.elsreq64.remoteID,
1562 irsp->ulpStatus, irsp->un.ulpWord[4],
1563 irsp->ulpIoTag);
1564 goto out;
1565 }
1566
1567 /* rrq completes to NPort <nlp_DID> */
1568 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1569 "2880 RRQ completes to NPort x%x "
1570 "Data: x%x x%x x%x x%x x%x\n",
1571 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1572 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1573
1574 if (irsp->ulpStatus) {
1575 /* Check for retry */
1576 /* RRQ failed Don't print the vport to vport rjts */
1577 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1578 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1579 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1580 (phba)->pport->cfg_log_verbose & LOG_ELS)
1581 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1582 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1583 ndlp->nlp_DID, irsp->ulpStatus,
1584 irsp->un.ulpWord[4]);
1585 }
1586 out:
1587 if (rrq)
1588 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1589 lpfc_els_free_iocb(phba, cmdiocb);
1590 return;
1591 }
1592 /**
1593 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1594 * @phba: pointer to lpfc hba data structure.
1595 * @cmdiocb: pointer to lpfc command iocb data structure.
1596 * @rspiocb: pointer to lpfc response iocb data structure.
1597 *
1598 * This routine is the completion callback function for issuing the Port
1599 * Login (PLOGI) command. For PLOGI completion, there must be an active
1600 * ndlp on the vport node list that matches the remote node ID from the
1601 * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
1602 * ignored and command IOCB released. The PLOGI response IOCB status is
1603 * checked for error conditons. If there is error status reported, PLOGI
1604 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1605 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1606 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1607 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1608 * there are additional N_Port nodes with the vport that need to perform
1609 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1610 * PLOGIs.
1611 **/
1612 static void
1613 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1614 struct lpfc_iocbq *rspiocb)
1615 {
1616 struct lpfc_vport *vport = cmdiocb->vport;
1617 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1618 IOCB_t *irsp;
1619 struct lpfc_nodelist *ndlp;
1620 struct lpfc_dmabuf *prsp;
1621 int disc, rc, did, type;
1622
1623 /* we pass cmdiocb to state machine which needs rspiocb as well */
1624 cmdiocb->context_un.rsp_iocb = rspiocb;
1625
1626 irsp = &rspiocb->iocb;
1627 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1628 "PLOGI cmpl: status:x%x/x%x did:x%x",
1629 irsp->ulpStatus, irsp->un.ulpWord[4],
1630 irsp->un.elsreq64.remoteID);
1631
1632 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1633 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1634 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1635 "0136 PLOGI completes to NPort x%x "
1636 "with no ndlp. Data: x%x x%x x%x\n",
1637 irsp->un.elsreq64.remoteID,
1638 irsp->ulpStatus, irsp->un.ulpWord[4],
1639 irsp->ulpIoTag);
1640 goto out;
1641 }
1642
1643 /* Since ndlp can be freed in the disc state machine, note if this node
1644 * is being used during discovery.
1645 */
1646 spin_lock_irq(shost->host_lock);
1647 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1648 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1649 spin_unlock_irq(shost->host_lock);
1650 rc = 0;
1651
1652 /* PLOGI completes to NPort <nlp_DID> */
1653 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1654 "0102 PLOGI completes to NPort x%x "
1655 "Data: x%x x%x x%x x%x x%x\n",
1656 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1657 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1658 /* Check to see if link went down during discovery */
1659 if (lpfc_els_chk_latt(vport)) {
1660 spin_lock_irq(shost->host_lock);
1661 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1662 spin_unlock_irq(shost->host_lock);
1663 goto out;
1664 }
1665
1666 /* ndlp could be freed in DSM, save these values now */
1667 type = ndlp->nlp_type;
1668 did = ndlp->nlp_DID;
1669
1670 if (irsp->ulpStatus) {
1671 /* Check for retry */
1672 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1673 /* ELS command is being retried */
1674 if (disc) {
1675 spin_lock_irq(shost->host_lock);
1676 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1677 spin_unlock_irq(shost->host_lock);
1678 }
1679 goto out;
1680 }
1681 /* PLOGI failed Don't print the vport to vport rjts */
1682 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1683 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1684 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1685 (phba)->pport->cfg_log_verbose & LOG_ELS)
1686 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1687 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1688 ndlp->nlp_DID, irsp->ulpStatus,
1689 irsp->un.ulpWord[4]);
1690 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1691 if (lpfc_error_lost_link(irsp))
1692 rc = NLP_STE_FREED_NODE;
1693 else
1694 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1695 NLP_EVT_CMPL_PLOGI);
1696 } else {
1697 /* Good status, call state machine */
1698 prsp = list_entry(((struct lpfc_dmabuf *)
1699 cmdiocb->context2)->list.next,
1700 struct lpfc_dmabuf, list);
1701 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1702 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1703 NLP_EVT_CMPL_PLOGI);
1704 }
1705
1706 if (disc && vport->num_disc_nodes) {
1707 /* Check to see if there are more PLOGIs to be sent */
1708 lpfc_more_plogi(vport);
1709
1710 if (vport->num_disc_nodes == 0) {
1711 spin_lock_irq(shost->host_lock);
1712 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1713 spin_unlock_irq(shost->host_lock);
1714
1715 lpfc_can_disctmo(vport);
1716 lpfc_end_rscn(vport);
1717 }
1718 }
1719
1720 out:
1721 lpfc_els_free_iocb(phba, cmdiocb);
1722 return;
1723 }
1724
1725 /**
1726 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1727 * @vport: pointer to a host virtual N_Port data structure.
1728 * @did: destination port identifier.
1729 * @retry: number of retries to the command IOCB.
1730 *
1731 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1732 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1733 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1734 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1735 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1736 *
1737 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1738 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1739 * will be stored into the context1 field of the IOCB for the completion
1740 * callback function to the PLOGI ELS command.
1741 *
1742 * Return code
1743 * 0 - Successfully issued a plogi for @vport
1744 * 1 - failed to issue a plogi for @vport
1745 **/
1746 int
1747 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1748 {
1749 struct lpfc_hba *phba = vport->phba;
1750 struct serv_parm *sp;
1751 IOCB_t *icmd;
1752 struct lpfc_nodelist *ndlp;
1753 struct lpfc_iocbq *elsiocb;
1754 struct lpfc_sli *psli;
1755 uint8_t *pcmd;
1756 uint16_t cmdsize;
1757 int ret;
1758
1759 psli = &phba->sli;
1760
1761 ndlp = lpfc_findnode_did(vport, did);
1762 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1763 ndlp = NULL;
1764
1765 /* If ndlp is not NULL, we will bump the reference count on it */
1766 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1767 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1768 ELS_CMD_PLOGI);
1769 if (!elsiocb)
1770 return 1;
1771
1772 icmd = &elsiocb->iocb;
1773 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1774
1775 /* For PLOGI request, remainder of payload is service parameters */
1776 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1777 pcmd += sizeof(uint32_t);
1778 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1779 sp = (struct serv_parm *) pcmd;
1780
1781 /*
1782 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1783 * to device on remote loops work.
1784 */
1785 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1786 sp->cmn.altBbCredit = 1;
1787
1788 if (sp->cmn.fcphLow < FC_PH_4_3)
1789 sp->cmn.fcphLow = FC_PH_4_3;
1790
1791 if (sp->cmn.fcphHigh < FC_PH3)
1792 sp->cmn.fcphHigh = FC_PH3;
1793
1794 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1795 "Issue PLOGI: did:x%x",
1796 did, 0, 0);
1797
1798 phba->fc_stat.elsXmitPLOGI++;
1799 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1800 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1801
1802 if (ret == IOCB_ERROR) {
1803 lpfc_els_free_iocb(phba, elsiocb);
1804 return 1;
1805 }
1806 return 0;
1807 }
1808
1809 /**
1810 * lpfc_cmpl_els_prli - Completion callback function for prli
1811 * @phba: pointer to lpfc hba data structure.
1812 * @cmdiocb: pointer to lpfc command iocb data structure.
1813 * @rspiocb: pointer to lpfc response iocb data structure.
1814 *
1815 * This routine is the completion callback function for a Process Login
1816 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1817 * status. If there is error status reported, PRLI retry shall be attempted
1818 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1819 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1820 * ndlp to mark the PRLI completion.
1821 **/
1822 static void
1823 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1824 struct lpfc_iocbq *rspiocb)
1825 {
1826 struct lpfc_vport *vport = cmdiocb->vport;
1827 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1828 IOCB_t *irsp;
1829 struct lpfc_sli *psli;
1830 struct lpfc_nodelist *ndlp;
1831
1832 psli = &phba->sli;
1833 /* we pass cmdiocb to state machine which needs rspiocb as well */
1834 cmdiocb->context_un.rsp_iocb = rspiocb;
1835
1836 irsp = &(rspiocb->iocb);
1837 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1838 spin_lock_irq(shost->host_lock);
1839 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1840 spin_unlock_irq(shost->host_lock);
1841
1842 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1843 "PRLI cmpl: status:x%x/x%x did:x%x",
1844 irsp->ulpStatus, irsp->un.ulpWord[4],
1845 ndlp->nlp_DID);
1846 /* PRLI completes to NPort <nlp_DID> */
1847 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1848 "0103 PRLI completes to NPort x%x "
1849 "Data: x%x x%x x%x x%x\n",
1850 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1851 irsp->ulpTimeout, vport->num_disc_nodes);
1852
1853 vport->fc_prli_sent--;
1854 /* Check to see if link went down during discovery */
1855 if (lpfc_els_chk_latt(vport))
1856 goto out;
1857
1858 if (irsp->ulpStatus) {
1859 /* Check for retry */
1860 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1861 /* ELS command is being retried */
1862 goto out;
1863 }
1864 /* PRLI failed */
1865 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1866 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1867 ndlp->nlp_DID, irsp->ulpStatus,
1868 irsp->un.ulpWord[4]);
1869 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1870 if (lpfc_error_lost_link(irsp))
1871 goto out;
1872 else
1873 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1874 NLP_EVT_CMPL_PRLI);
1875 } else
1876 /* Good status, call state machine */
1877 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1878 NLP_EVT_CMPL_PRLI);
1879 out:
1880 lpfc_els_free_iocb(phba, cmdiocb);
1881 return;
1882 }
1883
1884 /**
1885 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
1886 * @vport: pointer to a host virtual N_Port data structure.
1887 * @ndlp: pointer to a node-list data structure.
1888 * @retry: number of retries to the command IOCB.
1889 *
1890 * This routine issues a Process Login (PRLI) ELS command for the
1891 * @vport. The PRLI service parameters are set up in the payload of the
1892 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1893 * is put to the IOCB completion callback func field before invoking the
1894 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1895 *
1896 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1897 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1898 * will be stored into the context1 field of the IOCB for the completion
1899 * callback function to the PRLI ELS command.
1900 *
1901 * Return code
1902 * 0 - successfully issued prli iocb command for @vport
1903 * 1 - failed to issue prli iocb command for @vport
1904 **/
1905 int
1906 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1907 uint8_t retry)
1908 {
1909 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1910 struct lpfc_hba *phba = vport->phba;
1911 PRLI *npr;
1912 IOCB_t *icmd;
1913 struct lpfc_iocbq *elsiocb;
1914 uint8_t *pcmd;
1915 uint16_t cmdsize;
1916
1917 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1918 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1919 ndlp->nlp_DID, ELS_CMD_PRLI);
1920 if (!elsiocb)
1921 return 1;
1922
1923 icmd = &elsiocb->iocb;
1924 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1925
1926 /* For PRLI request, remainder of payload is service parameters */
1927 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
1928 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
1929 pcmd += sizeof(uint32_t);
1930
1931 /* For PRLI, remainder of payload is PRLI parameter page */
1932 npr = (PRLI *) pcmd;
1933 /*
1934 * If our firmware version is 3.20 or later,
1935 * set the following bits for FC-TAPE support.
1936 */
1937 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1938 npr->ConfmComplAllowed = 1;
1939 npr->Retry = 1;
1940 npr->TaskRetryIdReq = 1;
1941 }
1942 npr->estabImagePair = 1;
1943 npr->readXferRdyDis = 1;
1944
1945 /* For FCP support */
1946 npr->prliType = PRLI_FCP_TYPE;
1947 npr->initiatorFunc = 1;
1948
1949 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1950 "Issue PRLI: did:x%x",
1951 ndlp->nlp_DID, 0, 0);
1952
1953 phba->fc_stat.elsXmitPRLI++;
1954 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
1955 spin_lock_irq(shost->host_lock);
1956 ndlp->nlp_flag |= NLP_PRLI_SND;
1957 spin_unlock_irq(shost->host_lock);
1958 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1959 IOCB_ERROR) {
1960 spin_lock_irq(shost->host_lock);
1961 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1962 spin_unlock_irq(shost->host_lock);
1963 lpfc_els_free_iocb(phba, elsiocb);
1964 return 1;
1965 }
1966 vport->fc_prli_sent++;
1967 return 0;
1968 }
1969
1970 /**
1971 * lpfc_rscn_disc - Perform rscn discovery for a vport
1972 * @vport: pointer to a host virtual N_Port data structure.
1973 *
1974 * This routine performs Registration State Change Notification (RSCN)
1975 * discovery for a @vport. If the @vport's node port recovery count is not
1976 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1977 * the nodes that need recovery. If none of the PLOGI were needed through
1978 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1979 * invoked to check and handle possible more RSCN came in during the period
1980 * of processing the current ones.
1981 **/
1982 static void
1983 lpfc_rscn_disc(struct lpfc_vport *vport)
1984 {
1985 lpfc_can_disctmo(vport);
1986
1987 /* RSCN discovery */
1988 /* go thru NPR nodes and issue ELS PLOGIs */
1989 if (vport->fc_npr_cnt)
1990 if (lpfc_els_disc_plogi(vport))
1991 return;
1992
1993 lpfc_end_rscn(vport);
1994 }
1995
1996 /**
1997 * lpfc_adisc_done - Complete the adisc phase of discovery
1998 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
1999 *
2000 * This function is called when the final ADISC is completed during discovery.
2001 * This function handles clearing link attention or issuing reg_vpi depending
2002 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2003 * discovery.
2004 * This function is called with no locks held.
2005 **/
2006 static void
2007 lpfc_adisc_done(struct lpfc_vport *vport)
2008 {
2009 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2010 struct lpfc_hba *phba = vport->phba;
2011
2012 /*
2013 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2014 * and continue discovery.
2015 */
2016 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2017 !(vport->fc_flag & FC_RSCN_MODE) &&
2018 (phba->sli_rev < LPFC_SLI_REV4)) {
2019 lpfc_issue_reg_vpi(phba, vport);
2020 return;
2021 }
2022 /*
2023 * For SLI2, we need to set port_state to READY
2024 * and continue discovery.
2025 */
2026 if (vport->port_state < LPFC_VPORT_READY) {
2027 /* If we get here, there is nothing to ADISC */
2028 if (vport->port_type == LPFC_PHYSICAL_PORT)
2029 lpfc_issue_clear_la(phba, vport);
2030 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2031 vport->num_disc_nodes = 0;
2032 /* go thru NPR list, issue ELS PLOGIs */
2033 if (vport->fc_npr_cnt)
2034 lpfc_els_disc_plogi(vport);
2035 if (!vport->num_disc_nodes) {
2036 spin_lock_irq(shost->host_lock);
2037 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2038 spin_unlock_irq(shost->host_lock);
2039 lpfc_can_disctmo(vport);
2040 lpfc_end_rscn(vport);
2041 }
2042 }
2043 vport->port_state = LPFC_VPORT_READY;
2044 } else
2045 lpfc_rscn_disc(vport);
2046 }
2047
2048 /**
2049 * lpfc_more_adisc - Issue more adisc as needed
2050 * @vport: pointer to a host virtual N_Port data structure.
2051 *
2052 * This routine determines whether there are more ndlps on a @vport
2053 * node list need to have Address Discover (ADISC) issued. If so, it will
2054 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2055 * remaining nodes which need to have ADISC sent.
2056 **/
2057 void
2058 lpfc_more_adisc(struct lpfc_vport *vport)
2059 {
2060 int sentadisc;
2061
2062 if (vport->num_disc_nodes)
2063 vport->num_disc_nodes--;
2064 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2065 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2066 "0210 Continue discovery with %d ADISCs to go "
2067 "Data: x%x x%x x%x\n",
2068 vport->num_disc_nodes, vport->fc_adisc_cnt,
2069 vport->fc_flag, vport->port_state);
2070 /* Check to see if there are more ADISCs to be sent */
2071 if (vport->fc_flag & FC_NLP_MORE) {
2072 lpfc_set_disctmo(vport);
2073 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2074 sentadisc = lpfc_els_disc_adisc(vport);
2075 }
2076 if (!vport->num_disc_nodes)
2077 lpfc_adisc_done(vport);
2078 return;
2079 }
2080
2081 /**
2082 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2083 * @phba: pointer to lpfc hba data structure.
2084 * @cmdiocb: pointer to lpfc command iocb data structure.
2085 * @rspiocb: pointer to lpfc response iocb data structure.
2086 *
2087 * This routine is the completion function for issuing the Address Discover
2088 * (ADISC) command. It first checks to see whether link went down during
2089 * the discovery process. If so, the node will be marked as node port
2090 * recovery for issuing discover IOCB by the link attention handler and
2091 * exit. Otherwise, the response status is checked. If error was reported
2092 * in the response status, the ADISC command shall be retried by invoking
2093 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2094 * the response status, the state machine is invoked to set transition
2095 * with respect to NLP_EVT_CMPL_ADISC event.
2096 **/
2097 static void
2098 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2099 struct lpfc_iocbq *rspiocb)
2100 {
2101 struct lpfc_vport *vport = cmdiocb->vport;
2102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2103 IOCB_t *irsp;
2104 struct lpfc_nodelist *ndlp;
2105 int disc;
2106
2107 /* we pass cmdiocb to state machine which needs rspiocb as well */
2108 cmdiocb->context_un.rsp_iocb = rspiocb;
2109
2110 irsp = &(rspiocb->iocb);
2111 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2112
2113 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2114 "ADISC cmpl: status:x%x/x%x did:x%x",
2115 irsp->ulpStatus, irsp->un.ulpWord[4],
2116 ndlp->nlp_DID);
2117
2118 /* Since ndlp can be freed in the disc state machine, note if this node
2119 * is being used during discovery.
2120 */
2121 spin_lock_irq(shost->host_lock);
2122 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2123 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2124 spin_unlock_irq(shost->host_lock);
2125 /* ADISC completes to NPort <nlp_DID> */
2126 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2127 "0104 ADISC completes to NPort x%x "
2128 "Data: x%x x%x x%x x%x x%x\n",
2129 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2130 irsp->ulpTimeout, disc, vport->num_disc_nodes);
2131 /* Check to see if link went down during discovery */
2132 if (lpfc_els_chk_latt(vport)) {
2133 spin_lock_irq(shost->host_lock);
2134 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2135 spin_unlock_irq(shost->host_lock);
2136 goto out;
2137 }
2138
2139 if (irsp->ulpStatus) {
2140 /* Check for retry */
2141 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2142 /* ELS command is being retried */
2143 if (disc) {
2144 spin_lock_irq(shost->host_lock);
2145 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2146 spin_unlock_irq(shost->host_lock);
2147 lpfc_set_disctmo(vport);
2148 }
2149 goto out;
2150 }
2151 /* ADISC failed */
2152 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2153 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2154 ndlp->nlp_DID, irsp->ulpStatus,
2155 irsp->un.ulpWord[4]);
2156 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2157 if (!lpfc_error_lost_link(irsp))
2158 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2159 NLP_EVT_CMPL_ADISC);
2160 } else
2161 /* Good status, call state machine */
2162 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2163 NLP_EVT_CMPL_ADISC);
2164
2165 /* Check to see if there are more ADISCs to be sent */
2166 if (disc && vport->num_disc_nodes)
2167 lpfc_more_adisc(vport);
2168 out:
2169 lpfc_els_free_iocb(phba, cmdiocb);
2170 return;
2171 }
2172
2173 /**
2174 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2175 * @vport: pointer to a virtual N_Port data structure.
2176 * @ndlp: pointer to a node-list data structure.
2177 * @retry: number of retries to the command IOCB.
2178 *
2179 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2180 * @vport. It prepares the payload of the ADISC ELS command, updates the
2181 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2182 * to issue the ADISC ELS command.
2183 *
2184 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2185 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2186 * will be stored into the context1 field of the IOCB for the completion
2187 * callback function to the ADISC ELS command.
2188 *
2189 * Return code
2190 * 0 - successfully issued adisc
2191 * 1 - failed to issue adisc
2192 **/
2193 int
2194 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2195 uint8_t retry)
2196 {
2197 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2198 struct lpfc_hba *phba = vport->phba;
2199 ADISC *ap;
2200 IOCB_t *icmd;
2201 struct lpfc_iocbq *elsiocb;
2202 uint8_t *pcmd;
2203 uint16_t cmdsize;
2204
2205 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2206 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2207 ndlp->nlp_DID, ELS_CMD_ADISC);
2208 if (!elsiocb)
2209 return 1;
2210
2211 icmd = &elsiocb->iocb;
2212 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2213
2214 /* For ADISC request, remainder of payload is service parameters */
2215 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2216 pcmd += sizeof(uint32_t);
2217
2218 /* Fill in ADISC payload */
2219 ap = (ADISC *) pcmd;
2220 ap->hardAL_PA = phba->fc_pref_ALPA;
2221 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2222 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2223 ap->DID = be32_to_cpu(vport->fc_myDID);
2224
2225 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2226 "Issue ADISC: did:x%x",
2227 ndlp->nlp_DID, 0, 0);
2228
2229 phba->fc_stat.elsXmitADISC++;
2230 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2231 spin_lock_irq(shost->host_lock);
2232 ndlp->nlp_flag |= NLP_ADISC_SND;
2233 spin_unlock_irq(shost->host_lock);
2234 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2235 IOCB_ERROR) {
2236 spin_lock_irq(shost->host_lock);
2237 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2238 spin_unlock_irq(shost->host_lock);
2239 lpfc_els_free_iocb(phba, elsiocb);
2240 return 1;
2241 }
2242 return 0;
2243 }
2244
2245 /**
2246 * lpfc_cmpl_els_logo - Completion callback function for logo
2247 * @phba: pointer to lpfc hba data structure.
2248 * @cmdiocb: pointer to lpfc command iocb data structure.
2249 * @rspiocb: pointer to lpfc response iocb data structure.
2250 *
2251 * This routine is the completion function for issuing the ELS Logout (LOGO)
2252 * command. If no error status was reported from the LOGO response, the
2253 * state machine of the associated ndlp shall be invoked for transition with
2254 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2255 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2256 **/
2257 static void
2258 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2259 struct lpfc_iocbq *rspiocb)
2260 {
2261 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2262 struct lpfc_vport *vport = ndlp->vport;
2263 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2264 IOCB_t *irsp;
2265 struct lpfc_sli *psli;
2266 struct lpfcMboxq *mbox;
2267
2268 psli = &phba->sli;
2269 /* we pass cmdiocb to state machine which needs rspiocb as well */
2270 cmdiocb->context_un.rsp_iocb = rspiocb;
2271
2272 irsp = &(rspiocb->iocb);
2273 spin_lock_irq(shost->host_lock);
2274 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2275 spin_unlock_irq(shost->host_lock);
2276
2277 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2278 "LOGO cmpl: status:x%x/x%x did:x%x",
2279 irsp->ulpStatus, irsp->un.ulpWord[4],
2280 ndlp->nlp_DID);
2281 /* LOGO completes to NPort <nlp_DID> */
2282 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2283 "0105 LOGO completes to NPort x%x "
2284 "Data: x%x x%x x%x x%x\n",
2285 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2286 irsp->ulpTimeout, vport->num_disc_nodes);
2287 /* Check to see if link went down during discovery */
2288 if (lpfc_els_chk_latt(vport))
2289 goto out;
2290
2291 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2292 /* NLP_EVT_DEVICE_RM should unregister the RPI
2293 * which should abort all outstanding IOs.
2294 */
2295 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2296 NLP_EVT_DEVICE_RM);
2297 goto out;
2298 }
2299
2300 if (irsp->ulpStatus) {
2301 /* Check for retry */
2302 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
2303 /* ELS command is being retried */
2304 goto out;
2305 /* LOGO failed */
2306 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2307 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2308 ndlp->nlp_DID, irsp->ulpStatus,
2309 irsp->un.ulpWord[4]);
2310 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2311 if (lpfc_error_lost_link(irsp))
2312 goto out;
2313 else
2314 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2315 NLP_EVT_CMPL_LOGO);
2316 } else
2317 /* Good status, call state machine.
2318 * This will unregister the rpi if needed.
2319 */
2320 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2321 NLP_EVT_CMPL_LOGO);
2322 out:
2323 lpfc_els_free_iocb(phba, cmdiocb);
2324 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2325 if ((vport->fc_flag & FC_PT2PT) &&
2326 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
2327 phba->pport->fc_myDID = 0;
2328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2329 if (mbox) {
2330 lpfc_config_link(phba, mbox);
2331 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2332 mbox->vport = vport;
2333 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2334 MBX_NOT_FINISHED) {
2335 mempool_free(mbox, phba->mbox_mem_pool);
2336 }
2337 }
2338 }
2339 return;
2340 }
2341
2342 /**
2343 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2344 * @vport: pointer to a virtual N_Port data structure.
2345 * @ndlp: pointer to a node-list data structure.
2346 * @retry: number of retries to the command IOCB.
2347 *
2348 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2349 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2350 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2351 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2352 *
2353 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2354 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2355 * will be stored into the context1 field of the IOCB for the completion
2356 * callback function to the LOGO ELS command.
2357 *
2358 * Return code
2359 * 0 - successfully issued logo
2360 * 1 - failed to issue logo
2361 **/
2362 int
2363 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2364 uint8_t retry)
2365 {
2366 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2367 struct lpfc_hba *phba = vport->phba;
2368 IOCB_t *icmd;
2369 struct lpfc_iocbq *elsiocb;
2370 uint8_t *pcmd;
2371 uint16_t cmdsize;
2372 int rc;
2373
2374 spin_lock_irq(shost->host_lock);
2375 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2376 spin_unlock_irq(shost->host_lock);
2377 return 0;
2378 }
2379 spin_unlock_irq(shost->host_lock);
2380
2381 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2382 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2383 ndlp->nlp_DID, ELS_CMD_LOGO);
2384 if (!elsiocb)
2385 return 1;
2386
2387 icmd = &elsiocb->iocb;
2388 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2389 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2390 pcmd += sizeof(uint32_t);
2391
2392 /* Fill in LOGO payload */
2393 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2394 pcmd += sizeof(uint32_t);
2395 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2396
2397 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2398 "Issue LOGO: did:x%x",
2399 ndlp->nlp_DID, 0, 0);
2400
2401 phba->fc_stat.elsXmitLOGO++;
2402 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2403 spin_lock_irq(shost->host_lock);
2404 ndlp->nlp_flag |= NLP_LOGO_SND;
2405 spin_unlock_irq(shost->host_lock);
2406 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2407
2408 if (rc == IOCB_ERROR) {
2409 spin_lock_irq(shost->host_lock);
2410 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2411 spin_unlock_irq(shost->host_lock);
2412 lpfc_els_free_iocb(phba, elsiocb);
2413 return 1;
2414 }
2415 return 0;
2416 }
2417
2418 /**
2419 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2420 * @phba: pointer to lpfc hba data structure.
2421 * @cmdiocb: pointer to lpfc command iocb data structure.
2422 * @rspiocb: pointer to lpfc response iocb data structure.
2423 *
2424 * This routine is a generic completion callback function for ELS commands.
2425 * Specifically, it is the callback function which does not need to perform
2426 * any command specific operations. It is currently used by the ELS command
2427 * issuing routines for the ELS State Change Request (SCR),
2428 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2429 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2430 * certain debug loggings, this callback function simply invokes the
2431 * lpfc_els_chk_latt() routine to check whether link went down during the
2432 * discovery process.
2433 **/
2434 static void
2435 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2436 struct lpfc_iocbq *rspiocb)
2437 {
2438 struct lpfc_vport *vport = cmdiocb->vport;
2439 IOCB_t *irsp;
2440
2441 irsp = &rspiocb->iocb;
2442
2443 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2444 "ELS cmd cmpl: status:x%x/x%x did:x%x",
2445 irsp->ulpStatus, irsp->un.ulpWord[4],
2446 irsp->un.elsreq64.remoteID);
2447 /* ELS cmd tag <ulpIoTag> completes */
2448 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2449 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2450 irsp->ulpIoTag, irsp->ulpStatus,
2451 irsp->un.ulpWord[4], irsp->ulpTimeout);
2452 /* Check to see if link went down during discovery */
2453 lpfc_els_chk_latt(vport);
2454 lpfc_els_free_iocb(phba, cmdiocb);
2455 return;
2456 }
2457
2458 /**
2459 * lpfc_issue_els_scr - Issue a scr to an node on a vport
2460 * @vport: pointer to a host virtual N_Port data structure.
2461 * @nportid: N_Port identifier to the remote node.
2462 * @retry: number of retries to the command IOCB.
2463 *
2464 * This routine issues a State Change Request (SCR) to a fabric node
2465 * on a @vport. The remote node @nportid is passed into the function. It
2466 * first search the @vport node list to find the matching ndlp. If no such
2467 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2468 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2469 * routine is invoked to send the SCR IOCB.
2470 *
2471 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2472 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2473 * will be stored into the context1 field of the IOCB for the completion
2474 * callback function to the SCR ELS command.
2475 *
2476 * Return code
2477 * 0 - Successfully issued scr command
2478 * 1 - Failed to issue scr command
2479 **/
2480 int
2481 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2482 {
2483 struct lpfc_hba *phba = vport->phba;
2484 IOCB_t *icmd;
2485 struct lpfc_iocbq *elsiocb;
2486 struct lpfc_sli *psli;
2487 uint8_t *pcmd;
2488 uint16_t cmdsize;
2489 struct lpfc_nodelist *ndlp;
2490
2491 psli = &phba->sli;
2492 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2493
2494 ndlp = lpfc_findnode_did(vport, nportid);
2495 if (!ndlp) {
2496 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2497 if (!ndlp)
2498 return 1;
2499 lpfc_nlp_init(vport, ndlp, nportid);
2500 lpfc_enqueue_node(vport, ndlp);
2501 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2502 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2503 if (!ndlp)
2504 return 1;
2505 }
2506
2507 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2508 ndlp->nlp_DID, ELS_CMD_SCR);
2509
2510 if (!elsiocb) {
2511 /* This will trigger the release of the node just
2512 * allocated
2513 */
2514 lpfc_nlp_put(ndlp);
2515 return 1;
2516 }
2517
2518 icmd = &elsiocb->iocb;
2519 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2520
2521 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
2522 pcmd += sizeof(uint32_t);
2523
2524 /* For SCR, remainder of payload is SCR parameter page */
2525 memset(pcmd, 0, sizeof(SCR));
2526 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2527
2528 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2529 "Issue SCR: did:x%x",
2530 ndlp->nlp_DID, 0, 0);
2531
2532 phba->fc_stat.elsXmitSCR++;
2533 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2534 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2535 IOCB_ERROR) {
2536 /* The additional lpfc_nlp_put will cause the following
2537 * lpfc_els_free_iocb routine to trigger the rlease of
2538 * the node.
2539 */
2540 lpfc_nlp_put(ndlp);
2541 lpfc_els_free_iocb(phba, elsiocb);
2542 return 1;
2543 }
2544 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2545 * trigger the release of node.
2546 */
2547 lpfc_nlp_put(ndlp);
2548 return 0;
2549 }
2550
2551 /**
2552 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2553 * @vport: pointer to a host virtual N_Port data structure.
2554 * @nportid: N_Port identifier to the remote node.
2555 * @retry: number of retries to the command IOCB.
2556 *
2557 * This routine issues a Fibre Channel Address Resolution Response
2558 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2559 * is passed into the function. It first search the @vport node list to find
2560 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2561 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2562 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2563 *
2564 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2565 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2566 * will be stored into the context1 field of the IOCB for the completion
2567 * callback function to the PARPR ELS command.
2568 *
2569 * Return code
2570 * 0 - Successfully issued farpr command
2571 * 1 - Failed to issue farpr command
2572 **/
2573 static int
2574 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2575 {
2576 struct lpfc_hba *phba = vport->phba;
2577 IOCB_t *icmd;
2578 struct lpfc_iocbq *elsiocb;
2579 struct lpfc_sli *psli;
2580 FARP *fp;
2581 uint8_t *pcmd;
2582 uint32_t *lp;
2583 uint16_t cmdsize;
2584 struct lpfc_nodelist *ondlp;
2585 struct lpfc_nodelist *ndlp;
2586
2587 psli = &phba->sli;
2588 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2589
2590 ndlp = lpfc_findnode_did(vport, nportid);
2591 if (!ndlp) {
2592 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2593 if (!ndlp)
2594 return 1;
2595 lpfc_nlp_init(vport, ndlp, nportid);
2596 lpfc_enqueue_node(vport, ndlp);
2597 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2598 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2599 if (!ndlp)
2600 return 1;
2601 }
2602
2603 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2604 ndlp->nlp_DID, ELS_CMD_RNID);
2605 if (!elsiocb) {
2606 /* This will trigger the release of the node just
2607 * allocated
2608 */
2609 lpfc_nlp_put(ndlp);
2610 return 1;
2611 }
2612
2613 icmd = &elsiocb->iocb;
2614 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2615
2616 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
2617 pcmd += sizeof(uint32_t);
2618
2619 /* Fill in FARPR payload */
2620 fp = (FARP *) (pcmd);
2621 memset(fp, 0, sizeof(FARP));
2622 lp = (uint32_t *) pcmd;
2623 *lp++ = be32_to_cpu(nportid);
2624 *lp++ = be32_to_cpu(vport->fc_myDID);
2625 fp->Rflags = 0;
2626 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2627
2628 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2629 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2630 ondlp = lpfc_findnode_did(vport, nportid);
2631 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
2632 memcpy(&fp->OportName, &ondlp->nlp_portname,
2633 sizeof(struct lpfc_name));
2634 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
2635 sizeof(struct lpfc_name));
2636 }
2637
2638 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2639 "Issue FARPR: did:x%x",
2640 ndlp->nlp_DID, 0, 0);
2641
2642 phba->fc_stat.elsXmitFARPR++;
2643 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2644 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2645 IOCB_ERROR) {
2646 /* The additional lpfc_nlp_put will cause the following
2647 * lpfc_els_free_iocb routine to trigger the release of
2648 * the node.
2649 */
2650 lpfc_nlp_put(ndlp);
2651 lpfc_els_free_iocb(phba, elsiocb);
2652 return 1;
2653 }
2654 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2655 * trigger the release of the node.
2656 */
2657 lpfc_nlp_put(ndlp);
2658 return 0;
2659 }
2660
2661 /**
2662 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
2663 * @vport: pointer to a host virtual N_Port data structure.
2664 * @nlp: pointer to a node-list data structure.
2665 *
2666 * This routine cancels the timer with a delayed IOCB-command retry for
2667 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2668 * removes the ELS retry event if it presents. In addition, if the
2669 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2670 * commands are sent for the @vport's nodes that require issuing discovery
2671 * ADISC.
2672 **/
2673 void
2674 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
2675 {
2676 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2677 struct lpfc_work_evt *evtp;
2678
2679 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2680 return;
2681 spin_lock_irq(shost->host_lock);
2682 nlp->nlp_flag &= ~NLP_DELAY_TMO;
2683 spin_unlock_irq(shost->host_lock);
2684 del_timer_sync(&nlp->nlp_delayfunc);
2685 nlp->nlp_last_elscmd = 0;
2686 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
2687 list_del_init(&nlp->els_retry_evt.evt_listp);
2688 /* Decrement nlp reference count held for the delayed retry */
2689 evtp = &nlp->els_retry_evt;
2690 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2691 }
2692 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2693 spin_lock_irq(shost->host_lock);
2694 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2695 spin_unlock_irq(shost->host_lock);
2696 if (vport->num_disc_nodes) {
2697 if (vport->port_state < LPFC_VPORT_READY) {
2698 /* Check if there are more ADISCs to be sent */
2699 lpfc_more_adisc(vport);
2700 } else {
2701 /* Check if there are more PLOGIs to be sent */
2702 lpfc_more_plogi(vport);
2703 if (vport->num_disc_nodes == 0) {
2704 spin_lock_irq(shost->host_lock);
2705 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2706 spin_unlock_irq(shost->host_lock);
2707 lpfc_can_disctmo(vport);
2708 lpfc_end_rscn(vport);
2709 }
2710 }
2711 }
2712 }
2713 return;
2714 }
2715
2716 /**
2717 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
2718 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2719 *
2720 * This routine is invoked by the ndlp delayed-function timer to check
2721 * whether there is any pending ELS retry event(s) with the node. If not, it
2722 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2723 * adds the delayed events to the HBA work list and invokes the
2724 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2725 * event. Note that lpfc_nlp_get() is called before posting the event to
2726 * the work list to hold reference count of ndlp so that it guarantees the
2727 * reference to ndlp will still be available when the worker thread gets
2728 * to the event associated with the ndlp.
2729 **/
2730 void
2731 lpfc_els_retry_delay(unsigned long ptr)
2732 {
2733 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2734 struct lpfc_vport *vport = ndlp->vport;
2735 struct lpfc_hba *phba = vport->phba;
2736 unsigned long flags;
2737 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
2738
2739 spin_lock_irqsave(&phba->hbalock, flags);
2740 if (!list_empty(&evtp->evt_listp)) {
2741 spin_unlock_irqrestore(&phba->hbalock, flags);
2742 return;
2743 }
2744
2745 /* We need to hold the node by incrementing the reference
2746 * count until the queued work is done
2747 */
2748 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
2749 if (evtp->evt_arg1) {
2750 evtp->evt = LPFC_EVT_ELS_RETRY;
2751 list_add_tail(&evtp->evt_listp, &phba->work_list);
2752 lpfc_worker_wake_up(phba);
2753 }
2754 spin_unlock_irqrestore(&phba->hbalock, flags);
2755 return;
2756 }
2757
2758 /**
2759 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
2760 * @ndlp: pointer to a node-list data structure.
2761 *
2762 * This routine is the worker-thread handler for processing the @ndlp delayed
2763 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2764 * the last ELS command from the associated ndlp and invokes the proper ELS
2765 * function according to the delayed ELS command to retry the command.
2766 **/
2767 void
2768 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2769 {
2770 struct lpfc_vport *vport = ndlp->vport;
2771 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2772 uint32_t cmd, did, retry;
2773
2774 spin_lock_irq(shost->host_lock);
2775 did = ndlp->nlp_DID;
2776 cmd = ndlp->nlp_last_elscmd;
2777 ndlp->nlp_last_elscmd = 0;
2778
2779 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2780 spin_unlock_irq(shost->host_lock);
2781 return;
2782 }
2783
2784 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2785 spin_unlock_irq(shost->host_lock);
2786 /*
2787 * If a discovery event readded nlp_delayfunc after timer
2788 * firing and before processing the timer, cancel the
2789 * nlp_delayfunc.
2790 */
2791 del_timer_sync(&ndlp->nlp_delayfunc);
2792 retry = ndlp->nlp_retry;
2793 ndlp->nlp_retry = 0;
2794
2795 switch (cmd) {
2796 case ELS_CMD_FLOGI:
2797 lpfc_issue_els_flogi(vport, ndlp, retry);
2798 break;
2799 case ELS_CMD_PLOGI:
2800 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
2801 ndlp->nlp_prev_state = ndlp->nlp_state;
2802 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2803 }
2804 break;
2805 case ELS_CMD_ADISC:
2806 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
2807 ndlp->nlp_prev_state = ndlp->nlp_state;
2808 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2809 }
2810 break;
2811 case ELS_CMD_PRLI:
2812 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
2813 ndlp->nlp_prev_state = ndlp->nlp_state;
2814 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2815 }
2816 break;
2817 case ELS_CMD_LOGO:
2818 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
2819 ndlp->nlp_prev_state = ndlp->nlp_state;
2820 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2821 }
2822 break;
2823 case ELS_CMD_FDISC:
2824 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
2825 lpfc_issue_els_fdisc(vport, ndlp, retry);
2826 break;
2827 }
2828 return;
2829 }
2830
2831 /**
2832 * lpfc_els_retry - Make retry decision on an els command iocb
2833 * @phba: pointer to lpfc hba data structure.
2834 * @cmdiocb: pointer to lpfc command iocb data structure.
2835 * @rspiocb: pointer to lpfc response iocb data structure.
2836 *
2837 * This routine makes a retry decision on an ELS command IOCB, which has
2838 * failed. The following ELS IOCBs use this function for retrying the command
2839 * when previously issued command responsed with error status: FLOGI, PLOGI,
2840 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2841 * returned error status, it makes the decision whether a retry shall be
2842 * issued for the command, and whether a retry shall be made immediately or
2843 * delayed. In the former case, the corresponding ELS command issuing-function
2844 * is called to retry the command. In the later case, the ELS command shall
2845 * be posted to the ndlp delayed event and delayed function timer set to the
2846 * ndlp for the delayed command issusing.
2847 *
2848 * Return code
2849 * 0 - No retry of els command is made
2850 * 1 - Immediate or delayed retry of els command is made
2851 **/
2852 static int
2853 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2854 struct lpfc_iocbq *rspiocb)
2855 {
2856 struct lpfc_vport *vport = cmdiocb->vport;
2857 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2858 IOCB_t *irsp = &rspiocb->iocb;
2859 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2860 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2861 uint32_t *elscmd;
2862 struct ls_rjt stat;
2863 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
2864 int logerr = 0;
2865 uint32_t cmd = 0;
2866 uint32_t did;
2867
2868
2869 /* Note: context2 may be 0 for internal driver abort
2870 * of delays ELS command.
2871 */
2872
2873 if (pcmd && pcmd->virt) {
2874 elscmd = (uint32_t *) (pcmd->virt);
2875 cmd = *elscmd++;
2876 }
2877
2878 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2879 did = ndlp->nlp_DID;
2880 else {
2881 /* We should only hit this case for retrying PLOGI */
2882 did = irsp->un.elsreq64.remoteID;
2883 ndlp = lpfc_findnode_did(vport, did);
2884 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
2885 && (cmd != ELS_CMD_PLOGI))
2886 return 1;
2887 }
2888
2889 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2890 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
2891 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
2892
2893 switch (irsp->ulpStatus) {
2894 case IOSTAT_FCP_RSP_ERROR:
2895 break;
2896 case IOSTAT_REMOTE_STOP:
2897 if (phba->sli_rev == LPFC_SLI_REV4) {
2898 /* This IO was aborted by the target, we don't
2899 * know the rxid and because we did not send the
2900 * ABTS we cannot generate and RRQ.
2901 */
2902 lpfc_set_rrq_active(phba, ndlp,
2903 cmdiocb->sli4_xritag, 0, 0);
2904 }
2905 break;
2906 case IOSTAT_LOCAL_REJECT:
2907 switch ((irsp->un.ulpWord[4] & 0xff)) {
2908 case IOERR_LOOP_OPEN_FAILURE:
2909 if (cmd == ELS_CMD_FLOGI) {
2910 if (PCI_DEVICE_ID_HORNET ==
2911 phba->pcidev->device) {
2912 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
2913 phba->pport->fc_myDID = 0;
2914 phba->alpa_map[0] = 0;
2915 phba->alpa_map[1] = 0;
2916 }
2917 }
2918 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
2919 delay = 1000;
2920 retry = 1;
2921 break;
2922
2923 case IOERR_ILLEGAL_COMMAND:
2924 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2925 "0124 Retry illegal cmd x%x "
2926 "retry:x%x delay:x%x\n",
2927 cmd, cmdiocb->retry, delay);
2928 retry = 1;
2929 /* All command's retry policy */
2930 maxretry = 8;
2931 if (cmdiocb->retry > 2)
2932 delay = 1000;
2933 break;
2934
2935 case IOERR_NO_RESOURCES:
2936 logerr = 1; /* HBA out of resources */
2937 retry = 1;
2938 if (cmdiocb->retry > 100)
2939 delay = 100;
2940 maxretry = 250;
2941 break;
2942
2943 case IOERR_ILLEGAL_FRAME:
2944 delay = 100;
2945 retry = 1;
2946 break;
2947
2948 case IOERR_SEQUENCE_TIMEOUT:
2949 case IOERR_INVALID_RPI:
2950 retry = 1;
2951 break;
2952 }
2953 break;
2954
2955 case IOSTAT_NPORT_RJT:
2956 case IOSTAT_FABRIC_RJT:
2957 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
2958 retry = 1;
2959 break;
2960 }
2961 break;
2962
2963 case IOSTAT_NPORT_BSY:
2964 case IOSTAT_FABRIC_BSY:
2965 logerr = 1; /* Fabric / Remote NPort out of resources */
2966 retry = 1;
2967 break;
2968
2969 case IOSTAT_LS_RJT:
2970 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
2971 /* Added for Vendor specifc support
2972 * Just keep retrying for these Rsn / Exp codes
2973 */
2974 switch (stat.un.b.lsRjtRsnCode) {
2975 case LSRJT_UNABLE_TPC:
2976 if (stat.un.b.lsRjtRsnCodeExp ==
2977 LSEXP_CMD_IN_PROGRESS) {
2978 if (cmd == ELS_CMD_PLOGI) {
2979 delay = 1000;
2980 maxretry = 48;
2981 }
2982 retry = 1;
2983 break;
2984 }
2985 if (stat.un.b.lsRjtRsnCodeExp ==
2986 LSEXP_CANT_GIVE_DATA) {
2987 if (cmd == ELS_CMD_PLOGI) {
2988 delay = 1000;
2989 maxretry = 48;
2990 }
2991 retry = 1;
2992 break;
2993 }
2994 if (cmd == ELS_CMD_PLOGI) {
2995 delay = 1000;
2996 maxretry = lpfc_max_els_tries + 1;
2997 retry = 1;
2998 break;
2999 }
3000 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3001 (cmd == ELS_CMD_FDISC) &&
3002 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
3003 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3004 "0125 FDISC Failed (x%x). "
3005 "Fabric out of resources\n",
3006 stat.un.lsRjtError);
3007 lpfc_vport_set_state(vport,
3008 FC_VPORT_NO_FABRIC_RSCS);
3009 }
3010 break;
3011
3012 case LSRJT_LOGICAL_BSY:
3013 if ((cmd == ELS_CMD_PLOGI) ||
3014 (cmd == ELS_CMD_PRLI)) {
3015 delay = 1000;
3016 maxretry = 48;
3017 } else if (cmd == ELS_CMD_FDISC) {
3018 /* FDISC retry policy */
3019 maxretry = 48;
3020 if (cmdiocb->retry >= 32)
3021 delay = 1000;
3022 }
3023 retry = 1;
3024 break;
3025
3026 case LSRJT_LOGICAL_ERR:
3027 /* There are some cases where switches return this
3028 * error when they are not ready and should be returning
3029 * Logical Busy. We should delay every time.
3030 */
3031 if (cmd == ELS_CMD_FDISC &&
3032 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3033 maxretry = 3;
3034 delay = 1000;
3035 retry = 1;
3036 break;
3037 }
3038 case LSRJT_PROTOCOL_ERR:
3039 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3040 (cmd == ELS_CMD_FDISC) &&
3041 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3042 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3043 ) {
3044 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3045 "0122 FDISC Failed (x%x). "
3046 "Fabric Detected Bad WWN\n",
3047 stat.un.lsRjtError);
3048 lpfc_vport_set_state(vport,
3049 FC_VPORT_FABRIC_REJ_WWN);
3050 }
3051 break;
3052 }
3053 break;
3054
3055 case IOSTAT_INTERMED_RSP:
3056 case IOSTAT_BA_RJT:
3057 break;
3058
3059 default:
3060 break;
3061 }
3062
3063 if (did == FDMI_DID)
3064 retry = 1;
3065
3066 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
3067 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
3068 !lpfc_error_lost_link(irsp)) {
3069 /* FLOGI retry policy */
3070 retry = 1;
3071 /* retry forever */
3072 maxretry = 0;
3073 if (cmdiocb->retry >= 100)
3074 delay = 5000;
3075 else if (cmdiocb->retry >= 32)
3076 delay = 1000;
3077 }
3078
3079 cmdiocb->retry++;
3080 if (maxretry && (cmdiocb->retry >= maxretry)) {
3081 phba->fc_stat.elsRetryExceeded++;
3082 retry = 0;
3083 }
3084
3085 if ((vport->load_flag & FC_UNLOADING) != 0)
3086 retry = 0;
3087
3088 if (retry) {
3089 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3090 /* Stop retrying PLOGI and FDISC if in FCF discovery */
3091 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3092 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3093 "2849 Stop retry ELS command "
3094 "x%x to remote NPORT x%x, "
3095 "Data: x%x x%x\n", cmd, did,
3096 cmdiocb->retry, delay);
3097 return 0;
3098 }
3099 }
3100
3101 /* Retry ELS command <elsCmd> to remote NPORT <did> */
3102 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3103 "0107 Retry ELS command x%x to remote "
3104 "NPORT x%x Data: x%x x%x\n",
3105 cmd, did, cmdiocb->retry, delay);
3106
3107 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3108 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3109 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
3110 /* Don't reset timer for no resources */
3111
3112 /* If discovery / RSCN timer is running, reset it */
3113 if (timer_pending(&vport->fc_disctmo) ||
3114 (vport->fc_flag & FC_RSCN_MODE))
3115 lpfc_set_disctmo(vport);
3116 }
3117
3118 phba->fc_stat.elsXmitRetry++;
3119 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
3120 phba->fc_stat.elsDelayRetry++;
3121 ndlp->nlp_retry = cmdiocb->retry;
3122
3123 /* delay is specified in milliseconds */
3124 mod_timer(&ndlp->nlp_delayfunc,
3125 jiffies + msecs_to_jiffies(delay));
3126 spin_lock_irq(shost->host_lock);
3127 ndlp->nlp_flag |= NLP_DELAY_TMO;
3128 spin_unlock_irq(shost->host_lock);
3129
3130 ndlp->nlp_prev_state = ndlp->nlp_state;
3131 if (cmd == ELS_CMD_PRLI)
3132 lpfc_nlp_set_state(vport, ndlp,
3133 NLP_STE_REG_LOGIN_ISSUE);
3134 else
3135 lpfc_nlp_set_state(vport, ndlp,
3136 NLP_STE_NPR_NODE);
3137 ndlp->nlp_last_elscmd = cmd;
3138
3139 return 1;
3140 }
3141 switch (cmd) {
3142 case ELS_CMD_FLOGI:
3143 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
3144 return 1;
3145 case ELS_CMD_FDISC:
3146 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3147 return 1;
3148 case ELS_CMD_PLOGI:
3149 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3150 ndlp->nlp_prev_state = ndlp->nlp_state;
3151 lpfc_nlp_set_state(vport, ndlp,
3152 NLP_STE_PLOGI_ISSUE);
3153 }
3154 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
3155 return 1;
3156 case ELS_CMD_ADISC:
3157 ndlp->nlp_prev_state = ndlp->nlp_state;
3158 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3159 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
3160 return 1;
3161 case ELS_CMD_PRLI:
3162 ndlp->nlp_prev_state = ndlp->nlp_state;
3163 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3164 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
3165 return 1;
3166 case ELS_CMD_LOGO:
3167 ndlp->nlp_prev_state = ndlp->nlp_state;
3168 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3169 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3170 return 1;
3171 }
3172 }
3173 /* No retry ELS command <elsCmd> to remote NPORT <did> */
3174 if (logerr) {
3175 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3176 "0137 No retry ELS command x%x to remote "
3177 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3178 cmd, did, irsp->ulpStatus,
3179 irsp->un.ulpWord[4]);
3180 }
3181 else {
3182 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3183 "0108 No retry ELS command x%x to remote "
3184 "NPORT x%x Retried:%d Error:x%x/%x\n",
3185 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3186 irsp->un.ulpWord[4]);
3187 }
3188 return 0;
3189 }
3190
3191 /**
3192 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
3193 * @phba: pointer to lpfc hba data structure.
3194 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3195 *
3196 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3197 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3198 * checks to see whether there is a lpfc DMA buffer associated with the
3199 * response of the command IOCB. If so, it will be released before releasing
3200 * the lpfc DMA buffer associated with the IOCB itself.
3201 *
3202 * Return code
3203 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3204 **/
3205 static int
3206 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3207 {
3208 struct lpfc_dmabuf *buf_ptr;
3209
3210 /* Free the response before processing the command. */
3211 if (!list_empty(&buf_ptr1->list)) {
3212 list_remove_head(&buf_ptr1->list, buf_ptr,
3213 struct lpfc_dmabuf,
3214 list);
3215 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3216 kfree(buf_ptr);
3217 }
3218 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3219 kfree(buf_ptr1);
3220 return 0;
3221 }
3222
3223 /**
3224 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
3225 * @phba: pointer to lpfc hba data structure.
3226 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3227 *
3228 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3229 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3230 * pool.
3231 *
3232 * Return code
3233 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
3234 **/
3235 static int
3236 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3237 {
3238 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3239 kfree(buf_ptr);
3240 return 0;
3241 }
3242
3243 /**
3244 * lpfc_els_free_iocb - Free a command iocb and its associated resources
3245 * @phba: pointer to lpfc hba data structure.
3246 * @elsiocb: pointer to lpfc els command iocb data structure.
3247 *
3248 * This routine frees a command IOCB and its associated resources. The
3249 * command IOCB data structure contains the reference to various associated
3250 * resources, these fields must be set to NULL if the associated reference
3251 * not present:
3252 * context1 - reference to ndlp
3253 * context2 - reference to cmd
3254 * context2->next - reference to rsp
3255 * context3 - reference to bpl
3256 *
3257 * It first properly decrements the reference count held on ndlp for the
3258 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3259 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3260 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3261 * adds the DMA buffer the @phba data structure for the delayed release.
3262 * If reference to the Buffer Pointer List (BPL) is present, the
3263 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3264 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3265 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3266 *
3267 * Return code
3268 * 0 - Success (currently, always return 0)
3269 **/
3270 int
3271 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3272 {
3273 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
3274 struct lpfc_nodelist *ndlp;
3275
3276 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3277 if (ndlp) {
3278 if (ndlp->nlp_flag & NLP_DEFER_RM) {
3279 lpfc_nlp_put(ndlp);
3280
3281 /* If the ndlp is not being used by another discovery
3282 * thread, free it.
3283 */
3284 if (!lpfc_nlp_not_used(ndlp)) {
3285 /* If ndlp is being used by another discovery
3286 * thread, just clear NLP_DEFER_RM
3287 */
3288 ndlp->nlp_flag &= ~NLP_DEFER_RM;
3289 }
3290 }
3291 else
3292 lpfc_nlp_put(ndlp);
3293 elsiocb->context1 = NULL;
3294 }
3295 /* context2 = cmd, context2->next = rsp, context3 = bpl */
3296 if (elsiocb->context2) {
3297 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3298 /* Firmware could still be in progress of DMAing
3299 * payload, so don't free data buffer till after
3300 * a hbeat.
3301 */
3302 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3303 buf_ptr = elsiocb->context2;
3304 elsiocb->context2 = NULL;
3305 if (buf_ptr) {
3306 buf_ptr1 = NULL;
3307 spin_lock_irq(&phba->hbalock);
3308 if (!list_empty(&buf_ptr->list)) {
3309 list_remove_head(&buf_ptr->list,
3310 buf_ptr1, struct lpfc_dmabuf,
3311 list);
3312 INIT_LIST_HEAD(&buf_ptr1->list);
3313 list_add_tail(&buf_ptr1->list,
3314 &phba->elsbuf);
3315 phba->elsbuf_cnt++;
3316 }
3317 INIT_LIST_HEAD(&buf_ptr->list);
3318 list_add_tail(&buf_ptr->list, &phba->elsbuf);
3319 phba->elsbuf_cnt++;
3320 spin_unlock_irq(&phba->hbalock);
3321 }
3322 } else {
3323 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3324 lpfc_els_free_data(phba, buf_ptr1);
3325 }
3326 }
3327
3328 if (elsiocb->context3) {
3329 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3330 lpfc_els_free_bpl(phba, buf_ptr);
3331 }
3332 lpfc_sli_release_iocbq(phba, elsiocb);
3333 return 0;
3334 }
3335
3336 /**
3337 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3338 * @phba: pointer to lpfc hba data structure.
3339 * @cmdiocb: pointer to lpfc command iocb data structure.
3340 * @rspiocb: pointer to lpfc response iocb data structure.
3341 *
3342 * This routine is the completion callback function to the Logout (LOGO)
3343 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3344 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3345 * release the ndlp if it has the last reference remaining (reference count
3346 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3347 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3348 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3349 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3350 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3351 * IOCB data structure.
3352 **/
3353 static void
3354 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3355 struct lpfc_iocbq *rspiocb)
3356 {
3357 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3358 struct lpfc_vport *vport = cmdiocb->vport;
3359 IOCB_t *irsp;
3360
3361 irsp = &rspiocb->iocb;
3362 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3363 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3364 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
3365 /* ACC to LOGO completes to NPort <nlp_DID> */
3366 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3367 "0109 ACC to LOGO completes to NPort x%x "
3368 "Data: x%x x%x x%x\n",
3369 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3370 ndlp->nlp_rpi);
3371
3372 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3373 /* NPort Recovery mode or node is just allocated */
3374 if (!lpfc_nlp_not_used(ndlp)) {
3375 /* If the ndlp is being used by another discovery
3376 * thread, just unregister the RPI.
3377 */
3378 lpfc_unreg_rpi(vport, ndlp);
3379 } else {
3380 /* Indicate the node has already released, should
3381 * not reference to it from within lpfc_els_free_iocb.
3382 */
3383 cmdiocb->context1 = NULL;
3384 }
3385 }
3386 lpfc_els_free_iocb(phba, cmdiocb);
3387 return;
3388 }
3389
3390 /**
3391 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3392 * @phba: pointer to lpfc hba data structure.
3393 * @pmb: pointer to the driver internal queue element for mailbox command.
3394 *
3395 * This routine is the completion callback function for unregister default
3396 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3397 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3398 * decrements the ndlp reference count held for this completion callback
3399 * function. After that, it invokes the lpfc_nlp_not_used() to check
3400 * whether there is only one reference left on the ndlp. If so, it will
3401 * perform one more decrement and trigger the release of the ndlp.
3402 **/
3403 void
3404 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3405 {
3406 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3407 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3408
3409 pmb->context1 = NULL;
3410 pmb->context2 = NULL;
3411
3412 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3413 kfree(mp);
3414 mempool_free(pmb, phba->mbox_mem_pool);
3415 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3416 lpfc_nlp_put(ndlp);
3417 /* This is the end of the default RPI cleanup logic for this
3418 * ndlp. If no other discovery threads are using this ndlp.
3419 * we should free all resources associated with it.
3420 */
3421 lpfc_nlp_not_used(ndlp);
3422 }
3423
3424 return;
3425 }
3426
3427 /**
3428 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3429 * @phba: pointer to lpfc hba data structure.
3430 * @cmdiocb: pointer to lpfc command iocb data structure.
3431 * @rspiocb: pointer to lpfc response iocb data structure.
3432 *
3433 * This routine is the completion callback function for ELS Response IOCB
3434 * command. In normal case, this callback function just properly sets the
3435 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3436 * field in the command IOCB is not NULL, the referred mailbox command will
3437 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3438 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3439 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3440 * routine shall be invoked trying to release the ndlp if no other threads
3441 * are currently referring it.
3442 **/
3443 static void
3444 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3445 struct lpfc_iocbq *rspiocb)
3446 {
3447 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3448 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3449 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
3450 IOCB_t *irsp;
3451 uint8_t *pcmd;
3452 LPFC_MBOXQ_t *mbox = NULL;
3453 struct lpfc_dmabuf *mp = NULL;
3454 uint32_t ls_rjt = 0;
3455
3456 irsp = &rspiocb->iocb;
3457
3458 if (cmdiocb->context_un.mbox)
3459 mbox = cmdiocb->context_un.mbox;
3460
3461 /* First determine if this is a LS_RJT cmpl. Note, this callback
3462 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3463 */
3464 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
3465 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3466 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3467 /* A LS_RJT associated with Default RPI cleanup has its own
3468 * separate code path.
3469 */
3470 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3471 ls_rjt = 1;
3472 }
3473
3474 /* Check to see if link went down during discovery */
3475 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3476 if (mbox) {
3477 mp = (struct lpfc_dmabuf *) mbox->context1;
3478 if (mp) {
3479 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3480 kfree(mp);
3481 }
3482 mempool_free(mbox, phba->mbox_mem_pool);
3483 }
3484 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3485 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3486 if (lpfc_nlp_not_used(ndlp)) {
3487 ndlp = NULL;
3488 /* Indicate the node has already released,
3489 * should not reference to it from within
3490 * the routine lpfc_els_free_iocb.
3491 */
3492 cmdiocb->context1 = NULL;
3493 }
3494 goto out;
3495 }
3496
3497 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3498 "ELS rsp cmpl: status:x%x/x%x did:x%x",
3499 irsp->ulpStatus, irsp->un.ulpWord[4],
3500 cmdiocb->iocb.un.elsreq64.remoteID);
3501 /* ELS response tag <ulpIoTag> completes */
3502 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3503 "0110 ELS response tag x%x completes "
3504 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3505 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3506 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3507 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3508 ndlp->nlp_rpi);
3509 if (mbox) {
3510 if ((rspiocb->iocb.ulpStatus == 0)
3511 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
3512 lpfc_unreg_rpi(vport, ndlp);
3513 /* Increment reference count to ndlp to hold the
3514 * reference to ndlp for the callback function.
3515 */
3516 mbox->context2 = lpfc_nlp_get(ndlp);
3517 mbox->vport = vport;
3518 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3519 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3520 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3521 }
3522 else {
3523 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3524 ndlp->nlp_prev_state = ndlp->nlp_state;
3525 lpfc_nlp_set_state(vport, ndlp,
3526 NLP_STE_REG_LOGIN_ISSUE);
3527 }
3528 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3529 != MBX_NOT_FINISHED)
3530 goto out;
3531 else
3532 /* Decrement the ndlp reference count we
3533 * set for this failed mailbox command.
3534 */
3535 lpfc_nlp_put(ndlp);
3536
3537 /* ELS rsp: Cannot issue reg_login for <NPortid> */
3538 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3539 "0138 ELS rsp: Cannot issue reg_login for x%x "
3540 "Data: x%x x%x x%x\n",
3541 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3542 ndlp->nlp_rpi);
3543
3544 if (lpfc_nlp_not_used(ndlp)) {
3545 ndlp = NULL;
3546 /* Indicate node has already been released,
3547 * should not reference to it from within
3548 * the routine lpfc_els_free_iocb.
3549 */
3550 cmdiocb->context1 = NULL;
3551 }
3552 } else {
3553 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3554 if (!lpfc_error_lost_link(irsp) &&
3555 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
3556 if (lpfc_nlp_not_used(ndlp)) {
3557 ndlp = NULL;
3558 /* Indicate node has already been
3559 * released, should not reference
3560 * to it from within the routine
3561 * lpfc_els_free_iocb.
3562 */
3563 cmdiocb->context1 = NULL;
3564 }
3565 }
3566 }
3567 mp = (struct lpfc_dmabuf *) mbox->context1;
3568 if (mp) {
3569 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3570 kfree(mp);
3571 }
3572 mempool_free(mbox, phba->mbox_mem_pool);
3573 }
3574 out:
3575 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3576 spin_lock_irq(shost->host_lock);
3577 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
3578 spin_unlock_irq(shost->host_lock);
3579
3580 /* If the node is not being used by another discovery thread,
3581 * and we are sending a reject, we are done with it.
3582 * Release driver reference count here and free associated
3583 * resources.
3584 */
3585 if (ls_rjt)
3586 if (lpfc_nlp_not_used(ndlp))
3587 /* Indicate node has already been released,
3588 * should not reference to it from within
3589 * the routine lpfc_els_free_iocb.
3590 */
3591 cmdiocb->context1 = NULL;
3592 }
3593
3594 lpfc_els_free_iocb(phba, cmdiocb);
3595 return;
3596 }
3597
3598 /**
3599 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
3600 * @vport: pointer to a host virtual N_Port data structure.
3601 * @flag: the els command code to be accepted.
3602 * @oldiocb: pointer to the original lpfc command iocb data structure.
3603 * @ndlp: pointer to a node-list data structure.
3604 * @mbox: pointer to the driver internal queue element for mailbox command.
3605 *
3606 * This routine prepares and issues an Accept (ACC) response IOCB
3607 * command. It uses the @flag to properly set up the IOCB field for the
3608 * specific ACC response command to be issued and invokes the
3609 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3610 * @mbox pointer is passed in, it will be put into the context_un.mbox
3611 * field of the IOCB for the completion callback function to issue the
3612 * mailbox command to the HBA later when callback is invoked.
3613 *
3614 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3615 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3616 * will be stored into the context1 field of the IOCB for the completion
3617 * callback function to the corresponding response ELS IOCB command.
3618 *
3619 * Return code
3620 * 0 - Successfully issued acc response
3621 * 1 - Failed to issue acc response
3622 **/
3623 int
3624 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3625 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3626 LPFC_MBOXQ_t *mbox)
3627 {
3628 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3629 struct lpfc_hba *phba = vport->phba;
3630 IOCB_t *icmd;
3631 IOCB_t *oldcmd;
3632 struct lpfc_iocbq *elsiocb;
3633 struct lpfc_sli *psli;
3634 uint8_t *pcmd;
3635 uint16_t cmdsize;
3636 int rc;
3637 ELS_PKT *els_pkt_ptr;
3638
3639 psli = &phba->sli;
3640 oldcmd = &oldiocb->iocb;
3641
3642 switch (flag) {
3643 case ELS_CMD_ACC:
3644 cmdsize = sizeof(uint32_t);
3645 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3646 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3647 if (!elsiocb) {
3648 spin_lock_irq(shost->host_lock);
3649 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3650 spin_unlock_irq(shost->host_lock);
3651 return 1;
3652 }
3653
3654 icmd = &elsiocb->iocb;
3655 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3656 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3657 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3658 pcmd += sizeof(uint32_t);
3659
3660 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3661 "Issue ACC: did:x%x flg:x%x",
3662 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3663 break;
3664 case ELS_CMD_PLOGI:
3665 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
3666 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3667 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3668 if (!elsiocb)
3669 return 1;
3670
3671 icmd = &elsiocb->iocb;
3672 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3673 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3674
3675 if (mbox)
3676 elsiocb->context_un.mbox = mbox;
3677
3678 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3679 pcmd += sizeof(uint32_t);
3680 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
3681
3682 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3683 "Issue ACC PLOGI: did:x%x flg:x%x",
3684 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3685 break;
3686 case ELS_CMD_PRLO:
3687 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
3688 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3689 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3690 if (!elsiocb)
3691 return 1;
3692
3693 icmd = &elsiocb->iocb;
3694 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3695 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3696
3697 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
3698 sizeof(uint32_t) + sizeof(PRLO));
3699 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3700 els_pkt_ptr = (ELS_PKT *) pcmd;
3701 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
3702
3703 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3704 "Issue ACC PRLO: did:x%x flg:x%x",
3705 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3706 break;
3707 default:
3708 return 1;
3709 }
3710 /* Xmit ELS ACC response tag <ulpIoTag> */
3711 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3712 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3713 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3714 elsiocb->iotag, elsiocb->iocb.ulpContext,
3715 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3716 ndlp->nlp_rpi);
3717 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
3718 spin_lock_irq(shost->host_lock);
3719 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3720 spin_unlock_irq(shost->host_lock);
3721 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3722 } else {
3723 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3724 }
3725
3726 phba->fc_stat.elsXmitACC++;
3727 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3728 if (rc == IOCB_ERROR) {
3729 lpfc_els_free_iocb(phba, elsiocb);
3730 return 1;
3731 }
3732 return 0;
3733 }
3734
3735 /**
3736 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
3737 * @vport: pointer to a virtual N_Port data structure.
3738 * @rejectError:
3739 * @oldiocb: pointer to the original lpfc command iocb data structure.
3740 * @ndlp: pointer to a node-list data structure.
3741 * @mbox: pointer to the driver internal queue element for mailbox command.
3742 *
3743 * This routine prepares and issue an Reject (RJT) response IOCB
3744 * command. If a @mbox pointer is passed in, it will be put into the
3745 * context_un.mbox field of the IOCB for the completion callback function
3746 * to issue to the HBA later.
3747 *
3748 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3749 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3750 * will be stored into the context1 field of the IOCB for the completion
3751 * callback function to the reject response ELS IOCB command.
3752 *
3753 * Return code
3754 * 0 - Successfully issued reject response
3755 * 1 - Failed to issue reject response
3756 **/
3757 int
3758 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3759 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3760 LPFC_MBOXQ_t *mbox)
3761 {
3762 struct lpfc_hba *phba = vport->phba;
3763 IOCB_t *icmd;
3764 IOCB_t *oldcmd;
3765 struct lpfc_iocbq *elsiocb;
3766 struct lpfc_sli *psli;
3767 uint8_t *pcmd;
3768 uint16_t cmdsize;
3769 int rc;
3770
3771 psli = &phba->sli;
3772 cmdsize = 2 * sizeof(uint32_t);
3773 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3774 ndlp->nlp_DID, ELS_CMD_LS_RJT);
3775 if (!elsiocb)
3776 return 1;
3777
3778 icmd = &elsiocb->iocb;
3779 oldcmd = &oldiocb->iocb;
3780 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3781 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3782
3783 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
3784 pcmd += sizeof(uint32_t);
3785 *((uint32_t *) (pcmd)) = rejectError;
3786
3787 if (mbox)
3788 elsiocb->context_un.mbox = mbox;
3789
3790 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
3791 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3792 "0129 Xmit ELS RJT x%x response tag x%x "
3793 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3794 "rpi x%x\n",
3795 rejectError, elsiocb->iotag,
3796 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3797 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
3798 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3799 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
3800 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
3801
3802 phba->fc_stat.elsXmitLSRJT++;
3803 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3804 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3805
3806 if (rc == IOCB_ERROR) {
3807 lpfc_els_free_iocb(phba, elsiocb);
3808 return 1;
3809 }
3810 return 0;
3811 }
3812
3813 /**
3814 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
3815 * @vport: pointer to a virtual N_Port data structure.
3816 * @oldiocb: pointer to the original lpfc command iocb data structure.
3817 * @ndlp: pointer to a node-list data structure.
3818 *
3819 * This routine prepares and issues an Accept (ACC) response to Address
3820 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3821 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3822 *
3823 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3824 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3825 * will be stored into the context1 field of the IOCB for the completion
3826 * callback function to the ADISC Accept response ELS IOCB command.
3827 *
3828 * Return code
3829 * 0 - Successfully issued acc adisc response
3830 * 1 - Failed to issue adisc acc response
3831 **/
3832 int
3833 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3834 struct lpfc_nodelist *ndlp)
3835 {
3836 struct lpfc_hba *phba = vport->phba;
3837 ADISC *ap;
3838 IOCB_t *icmd, *oldcmd;
3839 struct lpfc_iocbq *elsiocb;
3840 uint8_t *pcmd;
3841 uint16_t cmdsize;
3842 int rc;
3843
3844 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
3845 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3846 ndlp->nlp_DID, ELS_CMD_ACC);
3847 if (!elsiocb)
3848 return 1;
3849
3850 icmd = &elsiocb->iocb;
3851 oldcmd = &oldiocb->iocb;
3852 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3853
3854 /* Xmit ADISC ACC response tag <ulpIoTag> */
3855 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3856 "0130 Xmit ADISC ACC response iotag x%x xri: "
3857 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3858 elsiocb->iotag, elsiocb->iocb.ulpContext,
3859 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3860 ndlp->nlp_rpi);
3861 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3862
3863 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3864 pcmd += sizeof(uint32_t);
3865
3866 ap = (ADISC *) (pcmd);
3867 ap->hardAL_PA = phba->fc_pref_ALPA;
3868 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3869 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3870 ap->DID = be32_to_cpu(vport->fc_myDID);
3871
3872 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3873 "Issue ACC ADISC: did:x%x flg:x%x",
3874 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3875
3876 phba->fc_stat.elsXmitACC++;
3877 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3878 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3879 if (rc == IOCB_ERROR) {
3880 lpfc_els_free_iocb(phba, elsiocb);
3881 return 1;
3882 }
3883 return 0;
3884 }
3885
3886 /**
3887 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
3888 * @vport: pointer to a virtual N_Port data structure.
3889 * @oldiocb: pointer to the original lpfc command iocb data structure.
3890 * @ndlp: pointer to a node-list data structure.
3891 *
3892 * This routine prepares and issues an Accept (ACC) response to Process
3893 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3894 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3895 *
3896 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3897 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3898 * will be stored into the context1 field of the IOCB for the completion
3899 * callback function to the PRLI Accept response ELS IOCB command.
3900 *
3901 * Return code
3902 * 0 - Successfully issued acc prli response
3903 * 1 - Failed to issue acc prli response
3904 **/
3905 int
3906 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3907 struct lpfc_nodelist *ndlp)
3908 {
3909 struct lpfc_hba *phba = vport->phba;
3910 PRLI *npr;
3911 lpfc_vpd_t *vpd;
3912 IOCB_t *icmd;
3913 IOCB_t *oldcmd;
3914 struct lpfc_iocbq *elsiocb;
3915 struct lpfc_sli *psli;
3916 uint8_t *pcmd;
3917 uint16_t cmdsize;
3918 int rc;
3919
3920 psli = &phba->sli;
3921
3922 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3923 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3924 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
3925 if (!elsiocb)
3926 return 1;
3927
3928 icmd = &elsiocb->iocb;
3929 oldcmd = &oldiocb->iocb;
3930 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3931 /* Xmit PRLI ACC response tag <ulpIoTag> */
3932 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3933 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3934 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3935 elsiocb->iotag, elsiocb->iocb.ulpContext,
3936 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3937 ndlp->nlp_rpi);
3938 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3939
3940 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
3941 pcmd += sizeof(uint32_t);
3942
3943 /* For PRLI, remainder of payload is PRLI parameter page */
3944 memset(pcmd, 0, sizeof(PRLI));
3945
3946 npr = (PRLI *) pcmd;
3947 vpd = &phba->vpd;
3948 /*
3949 * If the remote port is a target and our firmware version is 3.20 or
3950 * later, set the following bits for FC-TAPE support.
3951 */
3952 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3953 (vpd->rev.feaLevelHigh >= 0x02)) {
3954 npr->ConfmComplAllowed = 1;
3955 npr->Retry = 1;
3956 npr->TaskRetryIdReq = 1;
3957 }
3958
3959 npr->acceptRspCode = PRLI_REQ_EXECUTED;
3960 npr->estabImagePair = 1;
3961 npr->readXferRdyDis = 1;
3962 npr->ConfmComplAllowed = 1;
3963
3964 npr->prliType = PRLI_FCP_TYPE;
3965 npr->initiatorFunc = 1;
3966
3967 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3968 "Issue ACC PRLI: did:x%x flg:x%x",
3969 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3970
3971 phba->fc_stat.elsXmitACC++;
3972 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3973
3974 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3975 if (rc == IOCB_ERROR) {
3976 lpfc_els_free_iocb(phba, elsiocb);
3977 return 1;
3978 }
3979 return 0;
3980 }
3981
3982 /**
3983 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
3984 * @vport: pointer to a virtual N_Port data structure.
3985 * @format: rnid command format.
3986 * @oldiocb: pointer to the original lpfc command iocb data structure.
3987 * @ndlp: pointer to a node-list data structure.
3988 *
3989 * This routine issues a Request Node Identification Data (RNID) Accept
3990 * (ACC) response. It constructs the RNID ACC response command according to
3991 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
3992 * issue the response. Note that this command does not need to hold the ndlp
3993 * reference count for the callback. So, the ndlp reference count taken by
3994 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
3995 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
3996 * there is no ndlp reference available.
3997 *
3998 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3999 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4000 * will be stored into the context1 field of the IOCB for the completion
4001 * callback function. However, for the RNID Accept Response ELS command,
4002 * this is undone later by this routine after the IOCB is allocated.
4003 *
4004 * Return code
4005 * 0 - Successfully issued acc rnid response
4006 * 1 - Failed to issue acc rnid response
4007 **/
4008 static int
4009 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4010 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4011 {
4012 struct lpfc_hba *phba = vport->phba;
4013 RNID *rn;
4014 IOCB_t *icmd, *oldcmd;
4015 struct lpfc_iocbq *elsiocb;
4016 struct lpfc_sli *psli;
4017 uint8_t *pcmd;
4018 uint16_t cmdsize;
4019 int rc;
4020
4021 psli = &phba->sli;
4022 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4023 + (2 * sizeof(struct lpfc_name));
4024 if (format)
4025 cmdsize += sizeof(RNID_TOP_DISC);
4026
4027 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4028 ndlp->nlp_DID, ELS_CMD_ACC);
4029 if (!elsiocb)
4030 return 1;
4031
4032 icmd = &elsiocb->iocb;
4033 oldcmd = &oldiocb->iocb;
4034 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
4035 /* Xmit RNID ACC response tag <ulpIoTag> */
4036 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4037 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4038 elsiocb->iotag, elsiocb->iocb.ulpContext);
4039 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4040 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4041 pcmd += sizeof(uint32_t);
4042
4043 memset(pcmd, 0, sizeof(RNID));
4044 rn = (RNID *) (pcmd);
4045 rn->Format = format;
4046 rn->CommonLen = (2 * sizeof(struct lpfc_name));
4047 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4048 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4049 switch (format) {
4050 case 0:
4051 rn->SpecificLen = 0;
4052 break;
4053 case RNID_TOPOLOGY_DISC:
4054 rn->SpecificLen = sizeof(RNID_TOP_DISC);
4055 memcpy(&rn->un.topologyDisc.portName,
4056 &vport->fc_portname, sizeof(struct lpfc_name));
4057 rn->un.topologyDisc.unitType = RNID_HBA;
4058 rn->un.topologyDisc.physPort = 0;
4059 rn->un.topologyDisc.attachedNodes = 0;
4060 break;
4061 default:
4062 rn->CommonLen = 0;
4063 rn->SpecificLen = 0;
4064 break;
4065 }
4066
4067 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4068 "Issue ACC RNID: did:x%x flg:x%x",
4069 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4070
4071 phba->fc_stat.elsXmitACC++;
4072 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4073 lpfc_nlp_put(ndlp);
4074 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
4075 * it could be freed */
4076
4077 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4078 if (rc == IOCB_ERROR) {
4079 lpfc_els_free_iocb(phba, elsiocb);
4080 return 1;
4081 }
4082 return 0;
4083 }
4084
4085 /**
4086 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4087 * @vport: pointer to a virtual N_Port data structure.
4088 * @iocb: pointer to the lpfc command iocb data structure.
4089 * @ndlp: pointer to a node-list data structure.
4090 *
4091 * Return
4092 **/
4093 static void
4094 lpfc_els_clear_rrq(struct lpfc_vport *vport,
4095 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4096 {
4097 struct lpfc_hba *phba = vport->phba;
4098 uint8_t *pcmd;
4099 struct RRQ *rrq;
4100 uint16_t rxid;
4101 uint16_t xri;
4102 struct lpfc_node_rrq *prrq;
4103
4104
4105 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4106 pcmd += sizeof(uint32_t);
4107 rrq = (struct RRQ *)pcmd;
4108 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
4109 rxid = be16_to_cpu(bf_get(rrq_rxid, rrq));
4110
4111 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4112 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4113 " x%x x%x\n",
4114 be32_to_cpu(bf_get(rrq_did, rrq)),
4115 be16_to_cpu(bf_get(rrq_oxid, rrq)),
4116 rxid,
4117 iocb->iotag, iocb->iocb.ulpContext);
4118
4119 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4120 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4121 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
4122 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
4123 xri = be16_to_cpu(bf_get(rrq_oxid, rrq));
4124 else
4125 xri = rxid;
4126 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
4127 if (prrq)
4128 lpfc_clr_rrq_active(phba, xri, prrq);
4129 return;
4130 }
4131
4132 /**
4133 * lpfc_els_rsp_echo_acc - Issue echo acc response
4134 * @vport: pointer to a virtual N_Port data structure.
4135 * @data: pointer to echo data to return in the accept.
4136 * @oldiocb: pointer to the original lpfc command iocb data structure.
4137 * @ndlp: pointer to a node-list data structure.
4138 *
4139 * Return code
4140 * 0 - Successfully issued acc echo response
4141 * 1 - Failed to issue acc echo response
4142 **/
4143 static int
4144 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4145 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4146 {
4147 struct lpfc_hba *phba = vport->phba;
4148 struct lpfc_iocbq *elsiocb;
4149 struct lpfc_sli *psli;
4150 uint8_t *pcmd;
4151 uint16_t cmdsize;
4152 int rc;
4153
4154 psli = &phba->sli;
4155 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4156
4157 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4158 ndlp->nlp_DID, ELS_CMD_ACC);
4159 if (!elsiocb)
4160 return 1;
4161
4162 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
4163 /* Xmit ECHO ACC response tag <ulpIoTag> */
4164 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4165 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4166 elsiocb->iotag, elsiocb->iocb.ulpContext);
4167 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4168 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4169 pcmd += sizeof(uint32_t);
4170 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4171
4172 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4173 "Issue ACC ECHO: did:x%x flg:x%x",
4174 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4175
4176 phba->fc_stat.elsXmitACC++;
4177 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4178 lpfc_nlp_put(ndlp);
4179 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
4180 * it could be freed */
4181
4182 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4183 if (rc == IOCB_ERROR) {
4184 lpfc_els_free_iocb(phba, elsiocb);
4185 return 1;
4186 }
4187 return 0;
4188 }
4189
4190 /**
4191 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
4192 * @vport: pointer to a host virtual N_Port data structure.
4193 *
4194 * This routine issues Address Discover (ADISC) ELS commands to those
4195 * N_Ports which are in node port recovery state and ADISC has not been issued
4196 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4197 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4198 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4199 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4200 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4201 * IOCBs quit for later pick up. On the other hand, after walking through
4202 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4203 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4204 * no more ADISC need to be sent.
4205 *
4206 * Return code
4207 * The number of N_Ports with adisc issued.
4208 **/
4209 int
4210 lpfc_els_disc_adisc(struct lpfc_vport *vport)
4211 {
4212 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4213 struct lpfc_nodelist *ndlp, *next_ndlp;
4214 int sentadisc = 0;
4215
4216 /* go thru NPR nodes and issue any remaining ELS ADISCs */
4217 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4218 if (!NLP_CHK_NODE_ACT(ndlp))
4219 continue;
4220 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4221 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4222 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
4223 spin_lock_irq(shost->host_lock);
4224 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4225 spin_unlock_irq(shost->host_lock);
4226 ndlp->nlp_prev_state = ndlp->nlp_state;
4227 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4228 lpfc_issue_els_adisc(vport, ndlp, 0);
4229 sentadisc++;
4230 vport->num_disc_nodes++;
4231 if (vport->num_disc_nodes >=
4232 vport->cfg_discovery_threads) {
4233 spin_lock_irq(shost->host_lock);
4234 vport->fc_flag |= FC_NLP_MORE;
4235 spin_unlock_irq(shost->host_lock);
4236 break;
4237 }
4238 }
4239 }
4240 if (sentadisc == 0) {
4241 spin_lock_irq(shost->host_lock);
4242 vport->fc_flag &= ~FC_NLP_MORE;
4243 spin_unlock_irq(shost->host_lock);
4244 }
4245 return sentadisc;
4246 }
4247
4248 /**
4249 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
4250 * @vport: pointer to a host virtual N_Port data structure.
4251 *
4252 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4253 * which are in node port recovery state, with a @vport. Each time an ELS
4254 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4255 * the per @vport number of discover count (num_disc_nodes) shall be
4256 * incremented. If the num_disc_nodes reaches a pre-configured threshold
4257 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4258 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4259 * later pick up. On the other hand, after walking through all the ndlps with
4260 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4261 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4262 * PLOGI need to be sent.
4263 *
4264 * Return code
4265 * The number of N_Ports with plogi issued.
4266 **/
4267 int
4268 lpfc_els_disc_plogi(struct lpfc_vport *vport)
4269 {
4270 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4271 struct lpfc_nodelist *ndlp, *next_ndlp;
4272 int sentplogi = 0;
4273
4274 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4275 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4276 if (!NLP_CHK_NODE_ACT(ndlp))
4277 continue;
4278 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4279 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4280 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4281 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4282 ndlp->nlp_prev_state = ndlp->nlp_state;
4283 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4284 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4285 sentplogi++;
4286 vport->num_disc_nodes++;
4287 if (vport->num_disc_nodes >=
4288 vport->cfg_discovery_threads) {
4289 spin_lock_irq(shost->host_lock);
4290 vport->fc_flag |= FC_NLP_MORE;
4291 spin_unlock_irq(shost->host_lock);
4292 break;
4293 }
4294 }
4295 }
4296 if (sentplogi) {
4297 lpfc_set_disctmo(vport);
4298 }
4299 else {
4300 spin_lock_irq(shost->host_lock);
4301 vport->fc_flag &= ~FC_NLP_MORE;
4302 spin_unlock_irq(shost->host_lock);
4303 }
4304 return sentplogi;
4305 }
4306
4307 /**
4308 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
4309 * @vport: pointer to a host virtual N_Port data structure.
4310 *
4311 * This routine cleans up any Registration State Change Notification
4312 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
4313 * @vport together with the host_lock is used to prevent multiple thread
4314 * trying to access the RSCN array on a same @vport at the same time.
4315 **/
4316 void
4317 lpfc_els_flush_rscn(struct lpfc_vport *vport)
4318 {
4319 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4320 struct lpfc_hba *phba = vport->phba;
4321 int i;
4322
4323 spin_lock_irq(shost->host_lock);
4324 if (vport->fc_rscn_flush) {
4325 /* Another thread is walking fc_rscn_id_list on this vport */
4326 spin_unlock_irq(shost->host_lock);
4327 return;
4328 }
4329 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
4330 vport->fc_rscn_flush = 1;
4331 spin_unlock_irq(shost->host_lock);
4332
4333 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4334 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
4335 vport->fc_rscn_id_list[i] = NULL;
4336 }
4337 spin_lock_irq(shost->host_lock);
4338 vport->fc_rscn_id_cnt = 0;
4339 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
4340 spin_unlock_irq(shost->host_lock);
4341 lpfc_can_disctmo(vport);
4342 /* Indicate we are done walking this fc_rscn_id_list */
4343 vport->fc_rscn_flush = 0;
4344 }
4345
4346 /**
4347 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
4348 * @vport: pointer to a host virtual N_Port data structure.
4349 * @did: remote destination port identifier.
4350 *
4351 * This routine checks whether there is any pending Registration State
4352 * Configuration Notification (RSCN) to a @did on @vport.
4353 *
4354 * Return code
4355 * None zero - The @did matched with a pending rscn
4356 * 0 - not able to match @did with a pending rscn
4357 **/
4358 int
4359 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
4360 {
4361 D_ID ns_did;
4362 D_ID rscn_did;
4363 uint32_t *lp;
4364 uint32_t payload_len, i;
4365 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4366
4367 ns_did.un.word = did;
4368
4369 /* Never match fabric nodes for RSCNs */
4370 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4371 return 0;
4372
4373 /* If we are doing a FULL RSCN rediscovery, match everything */
4374 if (vport->fc_flag & FC_RSCN_DISCOVERY)
4375 return did;
4376
4377 spin_lock_irq(shost->host_lock);
4378 if (vport->fc_rscn_flush) {
4379 /* Another thread is walking fc_rscn_id_list on this vport */
4380 spin_unlock_irq(shost->host_lock);
4381 return 0;
4382 }
4383 /* Indicate we are walking fc_rscn_id_list on this vport */
4384 vport->fc_rscn_flush = 1;
4385 spin_unlock_irq(shost->host_lock);
4386 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4387 lp = vport->fc_rscn_id_list[i]->virt;
4388 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4389 payload_len -= sizeof(uint32_t); /* take off word 0 */
4390 while (payload_len) {
4391 rscn_did.un.word = be32_to_cpu(*lp++);
4392 payload_len -= sizeof(uint32_t);
4393 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4394 case RSCN_ADDRESS_FORMAT_PORT:
4395 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4396 && (ns_did.un.b.area == rscn_did.un.b.area)
4397 && (ns_did.un.b.id == rscn_did.un.b.id))
4398 goto return_did_out;
4399 break;
4400 case RSCN_ADDRESS_FORMAT_AREA:
4401 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4402 && (ns_did.un.b.area == rscn_did.un.b.area))
4403 goto return_did_out;
4404 break;
4405 case RSCN_ADDRESS_FORMAT_DOMAIN:
4406 if (ns_did.un.b.domain == rscn_did.un.b.domain)
4407 goto return_did_out;
4408 break;
4409 case RSCN_ADDRESS_FORMAT_FABRIC:
4410 goto return_did_out;
4411 }
4412 }
4413 }
4414 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4415 vport->fc_rscn_flush = 0;
4416 return 0;
4417 return_did_out:
4418 /* Indicate we are done with walking fc_rscn_id_list on this vport */
4419 vport->fc_rscn_flush = 0;
4420 return did;
4421 }
4422
4423 /**
4424 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
4425 * @vport: pointer to a host virtual N_Port data structure.
4426 *
4427 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4428 * state machine for a @vport's nodes that are with pending RSCN (Registration
4429 * State Change Notification).
4430 *
4431 * Return code
4432 * 0 - Successful (currently alway return 0)
4433 **/
4434 static int
4435 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
4436 {
4437 struct lpfc_nodelist *ndlp = NULL;
4438
4439 /* Move all affected nodes by pending RSCNs to NPR state. */
4440 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4441 if (!NLP_CHK_NODE_ACT(ndlp) ||
4442 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4443 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
4444 continue;
4445 lpfc_disc_state_machine(vport, ndlp, NULL,
4446 NLP_EVT_DEVICE_RECOVERY);
4447 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4448 }
4449 return 0;
4450 }
4451
4452 /**
4453 * lpfc_send_rscn_event - Send an RSCN event to management application
4454 * @vport: pointer to a host virtual N_Port data structure.
4455 * @cmdiocb: pointer to lpfc command iocb data structure.
4456 *
4457 * lpfc_send_rscn_event sends an RSCN netlink event to management
4458 * applications.
4459 */
4460 static void
4461 lpfc_send_rscn_event(struct lpfc_vport *vport,
4462 struct lpfc_iocbq *cmdiocb)
4463 {
4464 struct lpfc_dmabuf *pcmd;
4465 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4466 uint32_t *payload_ptr;
4467 uint32_t payload_len;
4468 struct lpfc_rscn_event_header *rscn_event_data;
4469
4470 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4471 payload_ptr = (uint32_t *) pcmd->virt;
4472 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4473
4474 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4475 payload_len, GFP_KERNEL);
4476 if (!rscn_event_data) {
4477 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4478 "0147 Failed to allocate memory for RSCN event\n");
4479 return;
4480 }
4481 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4482 rscn_event_data->payload_length = payload_len;
4483 memcpy(rscn_event_data->rscn_payload, payload_ptr,
4484 payload_len);
4485
4486 fc_host_post_vendor_event(shost,
4487 fc_get_event_number(),
4488 sizeof(struct lpfc_els_event_header) + payload_len,
4489 (char *)rscn_event_data,
4490 LPFC_NL_VENDOR_ID);
4491
4492 kfree(rscn_event_data);
4493 }
4494
4495 /**
4496 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
4497 * @vport: pointer to a host virtual N_Port data structure.
4498 * @cmdiocb: pointer to lpfc command iocb data structure.
4499 * @ndlp: pointer to a node-list data structure.
4500 *
4501 * This routine processes an unsolicited RSCN (Registration State Change
4502 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4503 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4504 * discover state machine is about to begin discovery, it just accepts the
4505 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4506 * contains N_Port IDs for other vports on this HBA, it just accepts the
4507 * RSCN and ignore processing it. If the state machine is in the recovery
4508 * state, the fc_rscn_id_list of this @vport is walked and the
4509 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4510 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4511 * routine is invoked to handle the RSCN event.
4512 *
4513 * Return code
4514 * 0 - Just sent the acc response
4515 * 1 - Sent the acc response and waited for name server completion
4516 **/
4517 static int
4518 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4519 struct lpfc_nodelist *ndlp)
4520 {
4521 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4522 struct lpfc_hba *phba = vport->phba;
4523 struct lpfc_dmabuf *pcmd;
4524 uint32_t *lp, *datap;
4525 IOCB_t *icmd;
4526 uint32_t payload_len, length, nportid, *cmd;
4527 int rscn_cnt;
4528 int rscn_id = 0, hba_id = 0;
4529 int i;
4530
4531 icmd = &cmdiocb->iocb;
4532 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4533 lp = (uint32_t *) pcmd->virt;
4534
4535 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4536 payload_len -= sizeof(uint32_t); /* take off word 0 */
4537 /* RSCN received */
4538 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4539 "0214 RSCN received Data: x%x x%x x%x x%x\n",
4540 vport->fc_flag, payload_len, *lp,
4541 vport->fc_rscn_id_cnt);
4542
4543 /* Send an RSCN event to the management application */
4544 lpfc_send_rscn_event(vport, cmdiocb);
4545
4546 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
4547 fc_host_post_event(shost, fc_get_event_number(),
4548 FCH_EVT_RSCN, lp[i]);
4549
4550 /* If we are about to begin discovery, just ACC the RSCN.
4551 * Discovery processing will satisfy it.
4552 */
4553 if (vport->port_state <= LPFC_NS_QRY) {
4554 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4555 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4556 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4557
4558 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4559 return 0;
4560 }
4561
4562 /* If this RSCN just contains NPortIDs for other vports on this HBA,
4563 * just ACC and ignore it.
4564 */
4565 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4566 !(vport->cfg_peer_port_login)) {
4567 i = payload_len;
4568 datap = lp;
4569 while (i > 0) {
4570 nportid = *datap++;
4571 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4572 i -= sizeof(uint32_t);
4573 rscn_id++;
4574 if (lpfc_find_vport_by_did(phba, nportid))
4575 hba_id++;
4576 }
4577 if (rscn_id == hba_id) {
4578 /* ALL NPortIDs in RSCN are on HBA */
4579 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4580 "0219 Ignore RSCN "
4581 "Data: x%x x%x x%x x%x\n",
4582 vport->fc_flag, payload_len,
4583 *lp, vport->fc_rscn_id_cnt);
4584 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4585 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4586 ndlp->nlp_DID, vport->port_state,
4587 ndlp->nlp_flag);
4588
4589 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
4590 ndlp, NULL);
4591 return 0;
4592 }
4593 }
4594
4595 spin_lock_irq(shost->host_lock);
4596 if (vport->fc_rscn_flush) {
4597 /* Another thread is walking fc_rscn_id_list on this vport */
4598 vport->fc_flag |= FC_RSCN_DISCOVERY;
4599 spin_unlock_irq(shost->host_lock);
4600 /* Send back ACC */
4601 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4602 return 0;
4603 }
4604 /* Indicate we are walking fc_rscn_id_list on this vport */
4605 vport->fc_rscn_flush = 1;
4606 spin_unlock_irq(shost->host_lock);
4607 /* Get the array count after successfully have the token */
4608 rscn_cnt = vport->fc_rscn_id_cnt;
4609 /* If we are already processing an RSCN, save the received
4610 * RSCN payload buffer, cmdiocb->context2 to process later.
4611 */
4612 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
4613 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4614 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4615 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4616
4617 spin_lock_irq(shost->host_lock);
4618 vport->fc_flag |= FC_RSCN_DEFERRED;
4619 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
4620 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
4621 vport->fc_flag |= FC_RSCN_MODE;
4622 spin_unlock_irq(shost->host_lock);
4623 if (rscn_cnt) {
4624 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4625 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4626 }
4627 if ((rscn_cnt) &&
4628 (payload_len + length <= LPFC_BPL_SIZE)) {
4629 *cmd &= ELS_CMD_MASK;
4630 *cmd |= cpu_to_be32(payload_len + length);
4631 memcpy(((uint8_t *)cmd) + length, lp,
4632 payload_len);
4633 } else {
4634 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4635 vport->fc_rscn_id_cnt++;
4636 /* If we zero, cmdiocb->context2, the calling
4637 * routine will not try to free it.
4638 */
4639 cmdiocb->context2 = NULL;
4640 }
4641 /* Deferred RSCN */
4642 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4643 "0235 Deferred RSCN "
4644 "Data: x%x x%x x%x\n",
4645 vport->fc_rscn_id_cnt, vport->fc_flag,
4646 vport->port_state);
4647 } else {
4648 vport->fc_flag |= FC_RSCN_DISCOVERY;
4649 spin_unlock_irq(shost->host_lock);
4650 /* ReDiscovery RSCN */
4651 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4652 "0234 ReDiscovery RSCN "
4653 "Data: x%x x%x x%x\n",
4654 vport->fc_rscn_id_cnt, vport->fc_flag,
4655 vport->port_state);
4656 }
4657 /* Indicate we are done walking fc_rscn_id_list on this vport */
4658 vport->fc_rscn_flush = 0;
4659 /* Send back ACC */
4660 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4661 /* send RECOVERY event for ALL nodes that match RSCN payload */
4662 lpfc_rscn_recovery_check(vport);
4663 spin_lock_irq(shost->host_lock);
4664 vport->fc_flag &= ~FC_RSCN_DEFERRED;
4665 spin_unlock_irq(shost->host_lock);
4666 return 0;
4667 }
4668 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4669 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4670 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4671
4672 spin_lock_irq(shost->host_lock);
4673 vport->fc_flag |= FC_RSCN_MODE;
4674 spin_unlock_irq(shost->host_lock);
4675 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
4676 /* Indicate we are done walking fc_rscn_id_list on this vport */
4677 vport->fc_rscn_flush = 0;
4678 /*
4679 * If we zero, cmdiocb->context2, the calling routine will
4680 * not try to free it.
4681 */
4682 cmdiocb->context2 = NULL;
4683 lpfc_set_disctmo(vport);
4684 /* Send back ACC */
4685 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4686 /* send RECOVERY event for ALL nodes that match RSCN payload */
4687 lpfc_rscn_recovery_check(vport);
4688 return lpfc_els_handle_rscn(vport);
4689 }
4690
4691 /**
4692 * lpfc_els_handle_rscn - Handle rscn for a vport
4693 * @vport: pointer to a host virtual N_Port data structure.
4694 *
4695 * This routine handles the Registration State Configuration Notification
4696 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4697 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4698 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4699 * NameServer shall be issued. If CT command to the NameServer fails to be
4700 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4701 * RSCN activities with the @vport.
4702 *
4703 * Return code
4704 * 0 - Cleaned up rscn on the @vport
4705 * 1 - Wait for plogi to name server before proceed
4706 **/
4707 int
4708 lpfc_els_handle_rscn(struct lpfc_vport *vport)
4709 {
4710 struct lpfc_nodelist *ndlp;
4711 struct lpfc_hba *phba = vport->phba;
4712
4713 /* Ignore RSCN if the port is being torn down. */
4714 if (vport->load_flag & FC_UNLOADING) {
4715 lpfc_els_flush_rscn(vport);
4716 return 0;
4717 }
4718
4719 /* Start timer for RSCN processing */
4720 lpfc_set_disctmo(vport);
4721
4722 /* RSCN processed */
4723 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4724 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4725 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4726 vport->port_state);
4727
4728 /* To process RSCN, first compare RSCN data with NameServer */
4729 vport->fc_ns_retry = 0;
4730 vport->num_disc_nodes = 0;
4731
4732 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4733 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4734 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
4735 /* Good ndlp, issue CT Request to NameServer */
4736 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
4737 /* Wait for NameServer query cmpl before we can
4738 continue */
4739 return 1;
4740 } else {
4741 /* If login to NameServer does not exist, issue one */
4742 /* Good status, issue PLOGI to NameServer */
4743 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4744 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
4745 /* Wait for NameServer login cmpl before we can
4746 continue */
4747 return 1;
4748
4749 if (ndlp) {
4750 ndlp = lpfc_enable_node(vport, ndlp,
4751 NLP_STE_PLOGI_ISSUE);
4752 if (!ndlp) {
4753 lpfc_els_flush_rscn(vport);
4754 return 0;
4755 }
4756 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
4757 } else {
4758 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4759 if (!ndlp) {
4760 lpfc_els_flush_rscn(vport);
4761 return 0;
4762 }
4763 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4764 ndlp->nlp_prev_state = ndlp->nlp_state;
4765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4766 }
4767 ndlp->nlp_type |= NLP_FABRIC;
4768 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
4769 /* Wait for NameServer login cmpl before we can
4770 * continue
4771 */
4772 return 1;
4773 }
4774
4775 lpfc_els_flush_rscn(vport);
4776 return 0;
4777 }
4778
4779 /**
4780 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
4781 * @vport: pointer to a host virtual N_Port data structure.
4782 * @cmdiocb: pointer to lpfc command iocb data structure.
4783 * @ndlp: pointer to a node-list data structure.
4784 *
4785 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4786 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4787 * point topology. As an unsolicited FLOGI should not be received in a loop
4788 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4789 * lpfc_check_sparm() routine is invoked to check the parameters in the
4790 * unsolicited FLOGI. If parameters validation failed, the routine
4791 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4792 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4793 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4794 * will initiate PLOGI. The higher lexicographical value party shall has
4795 * higher priority (as the winning port) and will initiate PLOGI and
4796 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4797 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4798 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4799 *
4800 * Return code
4801 * 0 - Successfully processed the unsolicited flogi
4802 * 1 - Failed to process the unsolicited flogi
4803 **/
4804 static int
4805 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4806 struct lpfc_nodelist *ndlp)
4807 {
4808 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4809 struct lpfc_hba *phba = vport->phba;
4810 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4811 uint32_t *lp = (uint32_t *) pcmd->virt;
4812 IOCB_t *icmd = &cmdiocb->iocb;
4813 struct serv_parm *sp;
4814 LPFC_MBOXQ_t *mbox;
4815 struct ls_rjt stat;
4816 uint32_t cmd, did;
4817 int rc;
4818
4819 cmd = *lp++;
4820 sp = (struct serv_parm *) lp;
4821
4822 /* FLOGI received */
4823
4824 lpfc_set_disctmo(vport);
4825
4826 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4827 /* We should never receive a FLOGI in loop mode, ignore it */
4828 did = icmd->un.elsreq64.remoteID;
4829
4830 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
4831 Loop Mode */
4832 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4833 "0113 An FLOGI ELS command x%x was "
4834 "received from DID x%x in Loop Mode\n",
4835 cmd, did);
4836 return 1;
4837 }
4838
4839 did = Fabric_DID;
4840
4841 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4842 /* For a FLOGI we accept, then if our portname is greater
4843 * then the remote portname we initiate Nport login.
4844 */
4845
4846 rc = memcmp(&vport->fc_portname, &sp->portName,
4847 sizeof(struct lpfc_name));
4848
4849 if (!rc) {
4850 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4851 if (!mbox)
4852 return 1;
4853
4854 lpfc_linkdown(phba);
4855 lpfc_init_link(phba, mbox,
4856 phba->cfg_topology,
4857 phba->cfg_link_speed);
4858 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4859 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4860 mbox->vport = vport;
4861 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4862 lpfc_set_loopback_flag(phba);
4863 if (rc == MBX_NOT_FINISHED) {
4864 mempool_free(mbox, phba->mbox_mem_pool);
4865 }
4866 return 1;
4867 } else if (rc > 0) { /* greater than */
4868 spin_lock_irq(shost->host_lock);
4869 vport->fc_flag |= FC_PT2PT_PLOGI;
4870 spin_unlock_irq(shost->host_lock);
4871 }
4872 spin_lock_irq(shost->host_lock);
4873 vport->fc_flag |= FC_PT2PT;
4874 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4875 spin_unlock_irq(shost->host_lock);
4876 } else {
4877 /* Reject this request because invalid parameters */
4878 stat.un.b.lsRjtRsvd0 = 0;
4879 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4880 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4881 stat.un.b.vendorUnique = 0;
4882 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4883 NULL);
4884 return 1;
4885 }
4886
4887 /* Send back ACC */
4888 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
4889
4890 return 0;
4891 }
4892
4893 /**
4894 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
4895 * @vport: pointer to a host virtual N_Port data structure.
4896 * @cmdiocb: pointer to lpfc command iocb data structure.
4897 * @ndlp: pointer to a node-list data structure.
4898 *
4899 * This routine processes Request Node Identification Data (RNID) IOCB
4900 * received as an ELS unsolicited event. Only when the RNID specified format
4901 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4902 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4903 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4904 * rejected by invoking the lpfc_els_rsp_reject() routine.
4905 *
4906 * Return code
4907 * 0 - Successfully processed rnid iocb (currently always return 0)
4908 **/
4909 static int
4910 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4911 struct lpfc_nodelist *ndlp)
4912 {
4913 struct lpfc_dmabuf *pcmd;
4914 uint32_t *lp;
4915 IOCB_t *icmd;
4916 RNID *rn;
4917 struct ls_rjt stat;
4918 uint32_t cmd, did;
4919
4920 icmd = &cmdiocb->iocb;
4921 did = icmd->un.elsreq64.remoteID;
4922 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4923 lp = (uint32_t *) pcmd->virt;
4924
4925 cmd = *lp++;
4926 rn = (RNID *) lp;
4927
4928 /* RNID received */
4929
4930 switch (rn->Format) {
4931 case 0:
4932 case RNID_TOPOLOGY_DISC:
4933 /* Send back ACC */
4934 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
4935 break;
4936 default:
4937 /* Reject this request because format not supported */
4938 stat.un.b.lsRjtRsvd0 = 0;
4939 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4940 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4941 stat.un.b.vendorUnique = 0;
4942 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4943 NULL);
4944 }
4945 return 0;
4946 }
4947
4948 /**
4949 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
4950 * @vport: pointer to a host virtual N_Port data structure.
4951 * @cmdiocb: pointer to lpfc command iocb data structure.
4952 * @ndlp: pointer to a node-list data structure.
4953 *
4954 * Return code
4955 * 0 - Successfully processed echo iocb (currently always return 0)
4956 **/
4957 static int
4958 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4959 struct lpfc_nodelist *ndlp)
4960 {
4961 uint8_t *pcmd;
4962
4963 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
4964
4965 /* skip over first word of echo command to find echo data */
4966 pcmd += sizeof(uint32_t);
4967
4968 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
4969 return 0;
4970 }
4971
4972 /**
4973 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
4974 * @vport: pointer to a host virtual N_Port data structure.
4975 * @cmdiocb: pointer to lpfc command iocb data structure.
4976 * @ndlp: pointer to a node-list data structure.
4977 *
4978 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4979 * received as an ELS unsolicited event. Currently, this function just invokes
4980 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4981 *
4982 * Return code
4983 * 0 - Successfully processed lirr iocb (currently always return 0)
4984 **/
4985 static int
4986 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4987 struct lpfc_nodelist *ndlp)
4988 {
4989 struct ls_rjt stat;
4990
4991 /* For now, unconditionally reject this command */
4992 stat.un.b.lsRjtRsvd0 = 0;
4993 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4994 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4995 stat.un.b.vendorUnique = 0;
4996 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
4997 return 0;
4998 }
4999
5000 /**
5001 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
5002 * @vport: pointer to a host virtual N_Port data structure.
5003 * @cmdiocb: pointer to lpfc command iocb data structure.
5004 * @ndlp: pointer to a node-list data structure.
5005 *
5006 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
5007 * received as an ELS unsolicited event. A request to RRQ shall only
5008 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
5009 * Nx_Port N_Port_ID of the target Exchange is the same as the
5010 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
5011 * not accepted, an LS_RJT with reason code "Unable to perform
5012 * command request" and reason code explanation "Invalid Originator
5013 * S_ID" shall be returned. For now, we just unconditionally accept
5014 * RRQ from the target.
5015 **/
5016 static void
5017 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5018 struct lpfc_nodelist *ndlp)
5019 {
5020 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
5021 if (vport->phba->sli_rev == LPFC_SLI_REV4)
5022 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
5023 }
5024
5025 /**
5026 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5027 * @phba: pointer to lpfc hba data structure.
5028 * @pmb: pointer to the driver internal queue element for mailbox command.
5029 *
5030 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5031 * mailbox command. This callback function is to actually send the Accept
5032 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5033 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5034 * mailbox command, constructs the RPS response with the link statistics
5035 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5036 * response to the RPS.
5037 *
5038 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5039 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5040 * will be stored into the context1 field of the IOCB for the completion
5041 * callback function to the RPS Accept Response ELS IOCB command.
5042 *
5043 **/
5044 static void
5045 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5046 {
5047 MAILBOX_t *mb;
5048 IOCB_t *icmd;
5049 struct RLS_RSP *rls_rsp;
5050 uint8_t *pcmd;
5051 struct lpfc_iocbq *elsiocb;
5052 struct lpfc_nodelist *ndlp;
5053 uint16_t xri;
5054 uint32_t cmdsize;
5055
5056 mb = &pmb->u.mb;
5057
5058 ndlp = (struct lpfc_nodelist *) pmb->context2;
5059 xri = (uint16_t) ((unsigned long)(pmb->context1));
5060 pmb->context1 = NULL;
5061 pmb->context2 = NULL;
5062
5063 if (mb->mbxStatus) {
5064 mempool_free(pmb, phba->mbox_mem_pool);
5065 return;
5066 }
5067
5068 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
5069 mempool_free(pmb, phba->mbox_mem_pool);
5070 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5071 lpfc_max_els_tries, ndlp,
5072 ndlp->nlp_DID, ELS_CMD_ACC);
5073
5074 /* Decrement the ndlp reference count from previous mbox command */
5075 lpfc_nlp_put(ndlp);
5076
5077 if (!elsiocb)
5078 return;
5079
5080 icmd = &elsiocb->iocb;
5081 icmd->ulpContext = xri;
5082
5083 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5084 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5085 pcmd += sizeof(uint32_t); /* Skip past command */
5086 rls_rsp = (struct RLS_RSP *)pcmd;
5087
5088 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5089 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5090 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5091 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5092 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5093 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5094
5095 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5096 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5097 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
5098 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5099 elsiocb->iotag, elsiocb->iocb.ulpContext,
5100 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5101 ndlp->nlp_rpi);
5102 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5103 phba->fc_stat.elsXmitACC++;
5104 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5105 lpfc_els_free_iocb(phba, elsiocb);
5106 }
5107
5108 /**
5109 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
5110 * @phba: pointer to lpfc hba data structure.
5111 * @pmb: pointer to the driver internal queue element for mailbox command.
5112 *
5113 * This routine is the completion callback function for the MBX_READ_LNK_STAT
5114 * mailbox command. This callback function is to actually send the Accept
5115 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
5116 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
5117 * mailbox command, constructs the RPS response with the link statistics
5118 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
5119 * response to the RPS.
5120 *
5121 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5122 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5123 * will be stored into the context1 field of the IOCB for the completion
5124 * callback function to the RPS Accept Response ELS IOCB command.
5125 *
5126 **/
5127 static void
5128 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5129 {
5130 MAILBOX_t *mb;
5131 IOCB_t *icmd;
5132 RPS_RSP *rps_rsp;
5133 uint8_t *pcmd;
5134 struct lpfc_iocbq *elsiocb;
5135 struct lpfc_nodelist *ndlp;
5136 uint16_t xri, status;
5137 uint32_t cmdsize;
5138
5139 mb = &pmb->u.mb;
5140
5141 ndlp = (struct lpfc_nodelist *) pmb->context2;
5142 xri = (uint16_t) ((unsigned long)(pmb->context1));
5143 pmb->context1 = NULL;
5144 pmb->context2 = NULL;
5145
5146 if (mb->mbxStatus) {
5147 mempool_free(pmb, phba->mbox_mem_pool);
5148 return;
5149 }
5150
5151 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
5152 mempool_free(pmb, phba->mbox_mem_pool);
5153 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5154 lpfc_max_els_tries, ndlp,
5155 ndlp->nlp_DID, ELS_CMD_ACC);
5156
5157 /* Decrement the ndlp reference count from previous mbox command */
5158 lpfc_nlp_put(ndlp);
5159
5160 if (!elsiocb)
5161 return;
5162
5163 icmd = &elsiocb->iocb;
5164 icmd->ulpContext = xri;
5165
5166 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5167 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5168 pcmd += sizeof(uint32_t); /* Skip past command */
5169 rps_rsp = (RPS_RSP *)pcmd;
5170
5171 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5172 status = 0x10;
5173 else
5174 status = 0x8;
5175 if (phba->pport->fc_flag & FC_FABRIC)
5176 status |= 0x4;
5177
5178 rps_rsp->rsvd1 = 0;
5179 rps_rsp->portStatus = cpu_to_be16(status);
5180 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
5181 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5182 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
5183 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5184 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5185 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5186 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
5187 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5188 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
5189 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5190 elsiocb->iotag, elsiocb->iocb.ulpContext,
5191 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5192 ndlp->nlp_rpi);
5193 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5194 phba->fc_stat.elsXmitACC++;
5195 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5196 lpfc_els_free_iocb(phba, elsiocb);
5197 return;
5198 }
5199
5200 /**
5201 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
5202 * @vport: pointer to a host virtual N_Port data structure.
5203 * @cmdiocb: pointer to lpfc command iocb data structure.
5204 * @ndlp: pointer to a node-list data structure.
5205 *
5206 * This routine processes Read Port Status (RPL) IOCB received as an
5207 * ELS unsolicited event. It first checks the remote port state. If the
5208 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5209 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5210 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5211 * for reading the HBA link statistics. It is for the callback function,
5212 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
5213 * to actually sending out RPL Accept (ACC) response.
5214 *
5215 * Return codes
5216 * 0 - Successfully processed rls iocb (currently always return 0)
5217 **/
5218 static int
5219 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5220 struct lpfc_nodelist *ndlp)
5221 {
5222 struct lpfc_hba *phba = vport->phba;
5223 LPFC_MBOXQ_t *mbox;
5224 struct lpfc_dmabuf *pcmd;
5225 struct ls_rjt stat;
5226
5227 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5228 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5229 /* reject the unsolicited RPS request and done with it */
5230 goto reject_out;
5231
5232 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5233
5234 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5235 if (mbox) {
5236 lpfc_read_lnk_stat(phba, mbox);
5237 mbox->context1 =
5238 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
5239 mbox->context2 = lpfc_nlp_get(ndlp);
5240 mbox->vport = vport;
5241 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
5242 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5243 != MBX_NOT_FINISHED)
5244 /* Mbox completion will send ELS Response */
5245 return 0;
5246 /* Decrement reference count used for the failed mbox
5247 * command.
5248 */
5249 lpfc_nlp_put(ndlp);
5250 mempool_free(mbox, phba->mbox_mem_pool);
5251 }
5252 reject_out:
5253 /* issue rejection response */
5254 stat.un.b.lsRjtRsvd0 = 0;
5255 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5256 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5257 stat.un.b.vendorUnique = 0;
5258 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5259 return 0;
5260 }
5261
5262 /**
5263 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
5264 * @vport: pointer to a host virtual N_Port data structure.
5265 * @cmdiocb: pointer to lpfc command iocb data structure.
5266 * @ndlp: pointer to a node-list data structure.
5267 *
5268 * This routine processes Read Timout Value (RTV) IOCB received as an
5269 * ELS unsolicited event. It first checks the remote port state. If the
5270 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5271 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5272 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
5273 * Value (RTV) unsolicited IOCB event.
5274 *
5275 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5276 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5277 * will be stored into the context1 field of the IOCB for the completion
5278 * callback function to the RPS Accept Response ELS IOCB command.
5279 *
5280 * Return codes
5281 * 0 - Successfully processed rtv iocb (currently always return 0)
5282 **/
5283 static int
5284 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5285 struct lpfc_nodelist *ndlp)
5286 {
5287 struct lpfc_hba *phba = vport->phba;
5288 struct ls_rjt stat;
5289 struct RTV_RSP *rtv_rsp;
5290 uint8_t *pcmd;
5291 struct lpfc_iocbq *elsiocb;
5292 uint32_t cmdsize;
5293
5294
5295 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5296 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5297 /* reject the unsolicited RPS request and done with it */
5298 goto reject_out;
5299
5300 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
5301 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5302 lpfc_max_els_tries, ndlp,
5303 ndlp->nlp_DID, ELS_CMD_ACC);
5304
5305 if (!elsiocb)
5306 return 1;
5307
5308 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5309 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5310 pcmd += sizeof(uint32_t); /* Skip past command */
5311
5312 /* use the command's xri in the response */
5313 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
5314
5315 rtv_rsp = (struct RTV_RSP *)pcmd;
5316
5317 /* populate RTV payload */
5318 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
5319 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
5320 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
5321 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
5322 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
5323
5324 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5325 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5326 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
5327 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
5328 "Data: x%x x%x x%x\n",
5329 elsiocb->iotag, elsiocb->iocb.ulpContext,
5330 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5331 ndlp->nlp_rpi,
5332 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
5333 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5334 phba->fc_stat.elsXmitACC++;
5335 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5336 lpfc_els_free_iocb(phba, elsiocb);
5337 return 0;
5338
5339 reject_out:
5340 /* issue rejection response */
5341 stat.un.b.lsRjtRsvd0 = 0;
5342 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5343 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5344 stat.un.b.vendorUnique = 0;
5345 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5346 return 0;
5347 }
5348
5349 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb
5350 * @vport: pointer to a host virtual N_Port data structure.
5351 * @cmdiocb: pointer to lpfc command iocb data structure.
5352 * @ndlp: pointer to a node-list data structure.
5353 *
5354 * This routine processes Read Port Status (RPS) IOCB received as an
5355 * ELS unsolicited event. It first checks the remote port state. If the
5356 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5357 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
5358 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5359 * for reading the HBA link statistics. It is for the callback function,
5360 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
5361 * to actually sending out RPS Accept (ACC) response.
5362 *
5363 * Return codes
5364 * 0 - Successfully processed rps iocb (currently always return 0)
5365 **/
5366 static int
5367 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5368 struct lpfc_nodelist *ndlp)
5369 {
5370 struct lpfc_hba *phba = vport->phba;
5371 uint32_t *lp;
5372 uint8_t flag;
5373 LPFC_MBOXQ_t *mbox;
5374 struct lpfc_dmabuf *pcmd;
5375 RPS *rps;
5376 struct ls_rjt stat;
5377
5378 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5379 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5380 /* reject the unsolicited RPS request and done with it */
5381 goto reject_out;
5382
5383 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5384 lp = (uint32_t *) pcmd->virt;
5385 flag = (be32_to_cpu(*lp++) & 0xf);
5386 rps = (RPS *) lp;
5387
5388 if ((flag == 0) ||
5389 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
5390 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
5391 sizeof(struct lpfc_name)) == 0))) {
5392
5393 printk("Fix me....\n");
5394 dump_stack();
5395 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5396 if (mbox) {
5397 lpfc_read_lnk_stat(phba, mbox);
5398 mbox->context1 =
5399 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
5400 mbox->context2 = lpfc_nlp_get(ndlp);
5401 mbox->vport = vport;
5402 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
5403 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5404 != MBX_NOT_FINISHED)
5405 /* Mbox completion will send ELS Response */
5406 return 0;
5407 /* Decrement reference count used for the failed mbox
5408 * command.
5409 */
5410 lpfc_nlp_put(ndlp);
5411 mempool_free(mbox, phba->mbox_mem_pool);
5412 }
5413 }
5414
5415 reject_out:
5416 /* issue rejection response */
5417 stat.un.b.lsRjtRsvd0 = 0;
5418 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5419 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5420 stat.un.b.vendorUnique = 0;
5421 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5422 return 0;
5423 }
5424
5425 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb
5426 * @vport: pointer to a host virtual N_Port data structure.
5427 * @ndlp: pointer to a node-list data structure.
5428 * @did: DID of the target.
5429 * @rrq: Pointer to the rrq struct.
5430 *
5431 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
5432 * Successful the the completion handler will clear the RRQ.
5433 *
5434 * Return codes
5435 * 0 - Successfully sent rrq els iocb.
5436 * 1 - Failed to send rrq els iocb.
5437 **/
5438 static int
5439 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5440 uint32_t did, struct lpfc_node_rrq *rrq)
5441 {
5442 struct lpfc_hba *phba = vport->phba;
5443 struct RRQ *els_rrq;
5444 IOCB_t *icmd;
5445 struct lpfc_iocbq *elsiocb;
5446 uint8_t *pcmd;
5447 uint16_t cmdsize;
5448 int ret;
5449
5450
5451 if (ndlp != rrq->ndlp)
5452 ndlp = rrq->ndlp;
5453 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5454 return 1;
5455
5456 /* If ndlp is not NULL, we will bump the reference count on it */
5457 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
5458 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
5459 ELS_CMD_RRQ);
5460 if (!elsiocb)
5461 return 1;
5462
5463 icmd = &elsiocb->iocb;
5464 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5465
5466 /* For RRQ request, remainder of payload is Exchange IDs */
5467 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
5468 pcmd += sizeof(uint32_t);
5469 els_rrq = (struct RRQ *) pcmd;
5470
5471 bf_set(rrq_oxid, els_rrq, rrq->xritag);
5472 bf_set(rrq_rxid, els_rrq, rrq->rxid);
5473 bf_set(rrq_did, els_rrq, vport->fc_myDID);
5474 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
5475 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
5476
5477
5478 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5479 "Issue RRQ: did:x%x",
5480 did, rrq->xritag, rrq->rxid);
5481 elsiocb->context_un.rrq = rrq;
5482 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
5483 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5484
5485 if (ret == IOCB_ERROR) {
5486 lpfc_els_free_iocb(phba, elsiocb);
5487 return 1;
5488 }
5489 return 0;
5490 }
5491
5492 /**
5493 * lpfc_send_rrq - Sends ELS RRQ if needed.
5494 * @phba: pointer to lpfc hba data structure.
5495 * @rrq: pointer to the active rrq.
5496 *
5497 * This routine will call the lpfc_issue_els_rrq if the rrq is
5498 * still active for the xri. If this function returns a failure then
5499 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
5500 *
5501 * Returns 0 Success.
5502 * 1 Failure.
5503 **/
5504 int
5505 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
5506 {
5507 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
5508 rrq->nlp_DID);
5509 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
5510 return lpfc_issue_els_rrq(rrq->vport, ndlp,
5511 rrq->nlp_DID, rrq);
5512 else
5513 return 1;
5514 }
5515
5516 /**
5517 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
5518 * @vport: pointer to a host virtual N_Port data structure.
5519 * @cmdsize: size of the ELS command.
5520 * @oldiocb: pointer to the original lpfc command iocb data structure.
5521 * @ndlp: pointer to a node-list data structure.
5522 *
5523 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
5524 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
5525 *
5526 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5527 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5528 * will be stored into the context1 field of the IOCB for the completion
5529 * callback function to the RPL Accept Response ELS command.
5530 *
5531 * Return code
5532 * 0 - Successfully issued ACC RPL ELS command
5533 * 1 - Failed to issue ACC RPL ELS command
5534 **/
5535 static int
5536 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5537 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5538 {
5539 struct lpfc_hba *phba = vport->phba;
5540 IOCB_t *icmd, *oldcmd;
5541 RPL_RSP rpl_rsp;
5542 struct lpfc_iocbq *elsiocb;
5543 uint8_t *pcmd;
5544
5545 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5546 ndlp->nlp_DID, ELS_CMD_ACC);
5547
5548 if (!elsiocb)
5549 return 1;
5550
5551 icmd = &elsiocb->iocb;
5552 oldcmd = &oldiocb->iocb;
5553 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
5554
5555 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5556 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5557 pcmd += sizeof(uint16_t);
5558 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
5559 pcmd += sizeof(uint16_t);
5560
5561 /* Setup the RPL ACC payload */
5562 rpl_rsp.listLen = be32_to_cpu(1);
5563 rpl_rsp.index = 0;
5564 rpl_rsp.port_num_blk.portNum = 0;
5565 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
5566 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
5567 sizeof(struct lpfc_name));
5568 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
5569 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
5570 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5571 "0120 Xmit ELS RPL ACC response tag x%x "
5572 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5573 "rpi x%x\n",
5574 elsiocb->iotag, elsiocb->iocb.ulpContext,
5575 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5576 ndlp->nlp_rpi);
5577 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5578 phba->fc_stat.elsXmitACC++;
5579 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
5580 IOCB_ERROR) {
5581 lpfc_els_free_iocb(phba, elsiocb);
5582 return 1;
5583 }
5584 return 0;
5585 }
5586
5587 /**
5588 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
5589 * @vport: pointer to a host virtual N_Port data structure.
5590 * @cmdiocb: pointer to lpfc command iocb data structure.
5591 * @ndlp: pointer to a node-list data structure.
5592 *
5593 * This routine processes Read Port List (RPL) IOCB received as an ELS
5594 * unsolicited event. It first checks the remote port state. If the remote
5595 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
5596 * invokes the lpfc_els_rsp_reject() routine to send reject response.
5597 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
5598 * to accept the RPL.
5599 *
5600 * Return code
5601 * 0 - Successfully processed rpl iocb (currently always return 0)
5602 **/
5603 static int
5604 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5605 struct lpfc_nodelist *ndlp)
5606 {
5607 struct lpfc_dmabuf *pcmd;
5608 uint32_t *lp;
5609 uint32_t maxsize;
5610 uint16_t cmdsize;
5611 RPL *rpl;
5612 struct ls_rjt stat;
5613
5614 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5615 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
5616 /* issue rejection response */
5617 stat.un.b.lsRjtRsvd0 = 0;
5618 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5619 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5620 stat.un.b.vendorUnique = 0;
5621 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5622 NULL);
5623 /* rejected the unsolicited RPL request and done with it */
5624 return 0;
5625 }
5626
5627 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5628 lp = (uint32_t *) pcmd->virt;
5629 rpl = (RPL *) (lp + 1);
5630 maxsize = be32_to_cpu(rpl->maxsize);
5631
5632 /* We support only one port */
5633 if ((rpl->index == 0) &&
5634 ((maxsize == 0) ||
5635 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
5636 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
5637 } else {
5638 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
5639 }
5640 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
5641
5642 return 0;
5643 }
5644
5645 /**
5646 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
5647 * @vport: pointer to a virtual N_Port data structure.
5648 * @cmdiocb: pointer to lpfc command iocb data structure.
5649 * @ndlp: pointer to a node-list data structure.
5650 *
5651 * This routine processes Fibre Channel Address Resolution Protocol
5652 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
5653 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
5654 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
5655 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
5656 * remote PortName is compared against the FC PortName stored in the @vport
5657 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
5658 * compared against the FC NodeName stored in the @vport data structure.
5659 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
5660 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
5661 * invoked to send out FARP Response to the remote node. Before sending the
5662 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
5663 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
5664 * routine is invoked to log into the remote port first.
5665 *
5666 * Return code
5667 * 0 - Either the FARP Match Mode not supported or successfully processed
5668 **/
5669 static int
5670 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5671 struct lpfc_nodelist *ndlp)
5672 {
5673 struct lpfc_dmabuf *pcmd;
5674 uint32_t *lp;
5675 IOCB_t *icmd;
5676 FARP *fp;
5677 uint32_t cmd, cnt, did;
5678
5679 icmd = &cmdiocb->iocb;
5680 did = icmd->un.elsreq64.remoteID;
5681 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5682 lp = (uint32_t *) pcmd->virt;
5683
5684 cmd = *lp++;
5685 fp = (FARP *) lp;
5686 /* FARP-REQ received from DID <did> */
5687 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5688 "0601 FARP-REQ received from DID x%x\n", did);
5689 /* We will only support match on WWPN or WWNN */
5690 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
5691 return 0;
5692 }
5693
5694 cnt = 0;
5695 /* If this FARP command is searching for my portname */
5696 if (fp->Mflags & FARP_MATCH_PORT) {
5697 if (memcmp(&fp->RportName, &vport->fc_portname,
5698 sizeof(struct lpfc_name)) == 0)
5699 cnt = 1;
5700 }
5701
5702 /* If this FARP command is searching for my nodename */
5703 if (fp->Mflags & FARP_MATCH_NODE) {
5704 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
5705 sizeof(struct lpfc_name)) == 0)
5706 cnt = 1;
5707 }
5708
5709 if (cnt) {
5710 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
5711 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
5712 /* Log back into the node before sending the FARP. */
5713 if (fp->Rflags & FARP_REQUEST_PLOGI) {
5714 ndlp->nlp_prev_state = ndlp->nlp_state;
5715 lpfc_nlp_set_state(vport, ndlp,
5716 NLP_STE_PLOGI_ISSUE);
5717 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5718 }
5719
5720 /* Send a FARP response to that node */
5721 if (fp->Rflags & FARP_REQUEST_FARPR)
5722 lpfc_issue_els_farpr(vport, did, 0);
5723 }
5724 }
5725 return 0;
5726 }
5727
5728 /**
5729 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
5730 * @vport: pointer to a host virtual N_Port data structure.
5731 * @cmdiocb: pointer to lpfc command iocb data structure.
5732 * @ndlp: pointer to a node-list data structure.
5733 *
5734 * This routine processes Fibre Channel Address Resolution Protocol
5735 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
5736 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
5737 * the FARP response request.
5738 *
5739 * Return code
5740 * 0 - Successfully processed FARPR IOCB (currently always return 0)
5741 **/
5742 static int
5743 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5744 struct lpfc_nodelist *ndlp)
5745 {
5746 struct lpfc_dmabuf *pcmd;
5747 uint32_t *lp;
5748 IOCB_t *icmd;
5749 uint32_t cmd, did;
5750
5751 icmd = &cmdiocb->iocb;
5752 did = icmd->un.elsreq64.remoteID;
5753 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5754 lp = (uint32_t *) pcmd->virt;
5755
5756 cmd = *lp++;
5757 /* FARP-RSP received from DID <did> */
5758 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5759 "0600 FARP-RSP received from DID x%x\n", did);
5760 /* ACCEPT the Farp resp request */
5761 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
5762
5763 return 0;
5764 }
5765
5766 /**
5767 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
5768 * @vport: pointer to a host virtual N_Port data structure.
5769 * @cmdiocb: pointer to lpfc command iocb data structure.
5770 * @fan_ndlp: pointer to a node-list data structure.
5771 *
5772 * This routine processes a Fabric Address Notification (FAN) IOCB
5773 * command received as an ELS unsolicited event. The FAN ELS command will
5774 * only be processed on a physical port (i.e., the @vport represents the
5775 * physical port). The fabric NodeName and PortName from the FAN IOCB are
5776 * compared against those in the phba data structure. If any of those is
5777 * different, the lpfc_initial_flogi() routine is invoked to initialize
5778 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
5779 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
5780 * is invoked to register login to the fabric.
5781 *
5782 * Return code
5783 * 0 - Successfully processed fan iocb (currently always return 0).
5784 **/
5785 static int
5786 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5787 struct lpfc_nodelist *fan_ndlp)
5788 {
5789 struct lpfc_hba *phba = vport->phba;
5790 uint32_t *lp;
5791 FAN *fp;
5792
5793 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
5794 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
5795 fp = (FAN *) ++lp;
5796 /* FAN received; Fan does not have a reply sequence */
5797 if ((vport == phba->pport) &&
5798 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5799 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
5800 sizeof(struct lpfc_name))) ||
5801 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
5802 sizeof(struct lpfc_name)))) {
5803 /* This port has switched fabrics. FLOGI is required */
5804 lpfc_issue_init_vfi(vport);
5805 } else {
5806 /* FAN verified - skip FLOGI */
5807 vport->fc_myDID = vport->fc_prevDID;
5808 if (phba->sli_rev < LPFC_SLI_REV4)
5809 lpfc_issue_fabric_reglogin(vport);
5810 else
5811 lpfc_issue_reg_vfi(vport);
5812 }
5813 }
5814 return 0;
5815 }
5816
5817 /**
5818 * lpfc_els_timeout - Handler funciton to the els timer
5819 * @ptr: holder for the timer function associated data.
5820 *
5821 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5822 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5823 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5824 * up the worker thread. It is for the worker thread to invoke the routine
5825 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5826 **/
5827 void
5828 lpfc_els_timeout(unsigned long ptr)
5829 {
5830 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5831 struct lpfc_hba *phba = vport->phba;
5832 uint32_t tmo_posted;
5833 unsigned long iflag;
5834
5835 spin_lock_irqsave(&vport->work_port_lock, iflag);
5836 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
5837 if (!tmo_posted)
5838 vport->work_port_events |= WORKER_ELS_TMO;
5839 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
5840
5841 if (!tmo_posted)
5842 lpfc_worker_wake_up(phba);
5843 return;
5844 }
5845
5846
5847 /**
5848 * lpfc_els_timeout_handler - Process an els timeout event
5849 * @vport: pointer to a virtual N_Port data structure.
5850 *
5851 * This routine is the actual handler function that processes an ELS timeout
5852 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5853 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5854 * invoking the lpfc_sli_issue_abort_iotag() routine.
5855 **/
5856 void
5857 lpfc_els_timeout_handler(struct lpfc_vport *vport)
5858 {
5859 struct lpfc_hba *phba = vport->phba;
5860 struct lpfc_sli_ring *pring;
5861 struct lpfc_iocbq *tmp_iocb, *piocb;
5862 IOCB_t *cmd = NULL;
5863 struct lpfc_dmabuf *pcmd;
5864 uint32_t els_command = 0;
5865 uint32_t timeout;
5866 uint32_t remote_ID = 0xffffffff;
5867 LIST_HEAD(txcmplq_completions);
5868 LIST_HEAD(abort_list);
5869
5870
5871 timeout = (uint32_t)(phba->fc_ratov << 1);
5872
5873 pring = &phba->sli.ring[LPFC_ELS_RING];
5874
5875 spin_lock_irq(&phba->hbalock);
5876 list_splice_init(&pring->txcmplq, &txcmplq_completions);
5877 spin_unlock_irq(&phba->hbalock);
5878
5879 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
5880 cmd = &piocb->iocb;
5881
5882 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
5883 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
5884 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5885 continue;
5886
5887 if (piocb->vport != vport)
5888 continue;
5889
5890 pcmd = (struct lpfc_dmabuf *) piocb->context2;
5891 if (pcmd)
5892 els_command = *(uint32_t *) (pcmd->virt);
5893
5894 if (els_command == ELS_CMD_FARP ||
5895 els_command == ELS_CMD_FARPR ||
5896 els_command == ELS_CMD_FDISC)
5897 continue;
5898
5899 if (piocb->drvrTimeout > 0) {
5900 if (piocb->drvrTimeout >= timeout)
5901 piocb->drvrTimeout -= timeout;
5902 else
5903 piocb->drvrTimeout = 0;
5904 continue;
5905 }
5906
5907 remote_ID = 0xffffffff;
5908 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
5909 remote_ID = cmd->un.elsreq64.remoteID;
5910 else {
5911 struct lpfc_nodelist *ndlp;
5912 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
5913 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5914 remote_ID = ndlp->nlp_DID;
5915 }
5916 list_add_tail(&piocb->dlist, &abort_list);
5917 }
5918 spin_lock_irq(&phba->hbalock);
5919 list_splice(&txcmplq_completions, &pring->txcmplq);
5920 spin_unlock_irq(&phba->hbalock);
5921
5922 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
5923 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5924 "0127 ELS timeout Data: x%x x%x x%x "
5925 "x%x\n", els_command,
5926 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5927 spin_lock_irq(&phba->hbalock);
5928 list_del_init(&piocb->dlist);
5929 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5930 spin_unlock_irq(&phba->hbalock);
5931 }
5932
5933 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5934 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
5935 }
5936
5937 /**
5938 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
5939 * @vport: pointer to a host virtual N_Port data structure.
5940 *
5941 * This routine is used to clean up all the outstanding ELS commands on a
5942 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5943 * routine. After that, it walks the ELS transmit queue to remove all the
5944 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5945 * the IOCBs with a non-NULL completion callback function, the callback
5946 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5947 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5948 * callback function, the IOCB will simply be released. Finally, it walks
5949 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5950 * completion queue IOCB that is associated with the @vport and is not
5951 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5952 * part of the discovery state machine) out to HBA by invoking the
5953 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5954 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5955 * the IOCBs are aborted when this function returns.
5956 **/
5957 void
5958 lpfc_els_flush_cmd(struct lpfc_vport *vport)
5959 {
5960 LIST_HEAD(completions);
5961 struct lpfc_hba *phba = vport->phba;
5962 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5963 struct lpfc_iocbq *tmp_iocb, *piocb;
5964 IOCB_t *cmd = NULL;
5965
5966 lpfc_fabric_abort_vport(vport);
5967
5968 spin_lock_irq(&phba->hbalock);
5969 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5970 cmd = &piocb->iocb;
5971
5972 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5973 continue;
5974 }
5975
5976 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5977 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5978 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5979 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5980 cmd->ulpCommand == CMD_ABORT_XRI_CN)
5981 continue;
5982
5983 if (piocb->vport != vport)
5984 continue;
5985
5986 list_move_tail(&piocb->list, &completions);
5987 pring->txq_cnt--;
5988 }
5989
5990 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5991 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5992 continue;
5993 }
5994
5995 if (piocb->vport != vport)
5996 continue;
5997
5998 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5999 }
6000 spin_unlock_irq(&phba->hbalock);
6001
6002 /* Cancell all the IOCBs from the completions list */
6003 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6004 IOERR_SLI_ABORTED);
6005
6006 return;
6007 }
6008
6009 /**
6010 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
6011 * @phba: pointer to lpfc hba data structure.
6012 *
6013 * This routine is used to clean up all the outstanding ELS commands on a
6014 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
6015 * routine. After that, it walks the ELS transmit queue to remove all the
6016 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
6017 * the IOCBs with the completion callback function associated, the callback
6018 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
6019 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
6020 * callback function associated, the IOCB will simply be released. Finally,
6021 * it walks the ELS transmit completion queue to issue an abort IOCB to any
6022 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
6023 * management plane IOCBs that are not part of the discovery state machine)
6024 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
6025 **/
6026 void
6027 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
6028 {
6029 LIST_HEAD(completions);
6030 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6031 struct lpfc_iocbq *tmp_iocb, *piocb;
6032 IOCB_t *cmd = NULL;
6033
6034 lpfc_fabric_abort_hba(phba);
6035 spin_lock_irq(&phba->hbalock);
6036 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6037 cmd = &piocb->iocb;
6038 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6039 continue;
6040 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6041 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6042 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
6043 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6044 cmd->ulpCommand == CMD_ABORT_XRI_CN)
6045 continue;
6046 list_move_tail(&piocb->list, &completions);
6047 pring->txq_cnt--;
6048 }
6049 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6050 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6051 continue;
6052 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6053 }
6054 spin_unlock_irq(&phba->hbalock);
6055
6056 /* Cancel all the IOCBs from the completions list */
6057 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6058 IOERR_SLI_ABORTED);
6059
6060 return;
6061 }
6062
6063 /**
6064 * lpfc_send_els_failure_event - Posts an ELS command failure event
6065 * @phba: Pointer to hba context object.
6066 * @cmdiocbp: Pointer to command iocb which reported error.
6067 * @rspiocbp: Pointer to response iocb which reported error.
6068 *
6069 * This function sends an event when there is an ELS command
6070 * failure.
6071 **/
6072 void
6073 lpfc_send_els_failure_event(struct lpfc_hba *phba,
6074 struct lpfc_iocbq *cmdiocbp,
6075 struct lpfc_iocbq *rspiocbp)
6076 {
6077 struct lpfc_vport *vport = cmdiocbp->vport;
6078 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6079 struct lpfc_lsrjt_event lsrjt_event;
6080 struct lpfc_fabric_event_header fabric_event;
6081 struct ls_rjt stat;
6082 struct lpfc_nodelist *ndlp;
6083 uint32_t *pcmd;
6084
6085 ndlp = cmdiocbp->context1;
6086 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6087 return;
6088
6089 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
6090 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
6091 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
6092 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
6093 sizeof(struct lpfc_name));
6094 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
6095 sizeof(struct lpfc_name));
6096 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
6097 cmdiocbp->context2)->virt);
6098 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
6099 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
6100 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
6101 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
6102 fc_host_post_vendor_event(shost,
6103 fc_get_event_number(),
6104 sizeof(lsrjt_event),
6105 (char *)&lsrjt_event,
6106 LPFC_NL_VENDOR_ID);
6107 return;
6108 }
6109 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
6110 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
6111 fabric_event.event_type = FC_REG_FABRIC_EVENT;
6112 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
6113 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
6114 else
6115 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
6116 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
6117 sizeof(struct lpfc_name));
6118 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
6119 sizeof(struct lpfc_name));
6120 fc_host_post_vendor_event(shost,
6121 fc_get_event_number(),
6122 sizeof(fabric_event),
6123 (char *)&fabric_event,
6124 LPFC_NL_VENDOR_ID);
6125 return;
6126 }
6127
6128 }
6129
6130 /**
6131 * lpfc_send_els_event - Posts unsolicited els event
6132 * @vport: Pointer to vport object.
6133 * @ndlp: Pointer FC node object.
6134 * @cmd: ELS command code.
6135 *
6136 * This function posts an event when there is an incoming
6137 * unsolicited ELS command.
6138 **/
6139 static void
6140 lpfc_send_els_event(struct lpfc_vport *vport,
6141 struct lpfc_nodelist *ndlp,
6142 uint32_t *payload)
6143 {
6144 struct lpfc_els_event_header *els_data = NULL;
6145 struct lpfc_logo_event *logo_data = NULL;
6146 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6147
6148 if (*payload == ELS_CMD_LOGO) {
6149 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
6150 if (!logo_data) {
6151 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6152 "0148 Failed to allocate memory "
6153 "for LOGO event\n");
6154 return;
6155 }
6156 els_data = &logo_data->header;
6157 } else {
6158 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
6159 GFP_KERNEL);
6160 if (!els_data) {
6161 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6162 "0149 Failed to allocate memory "
6163 "for ELS event\n");
6164 return;
6165 }
6166 }
6167 els_data->event_type = FC_REG_ELS_EVENT;
6168 switch (*payload) {
6169 case ELS_CMD_PLOGI:
6170 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
6171 break;
6172 case ELS_CMD_PRLO:
6173 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
6174 break;
6175 case ELS_CMD_ADISC:
6176 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
6177 break;
6178 case ELS_CMD_LOGO:
6179 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
6180 /* Copy the WWPN in the LOGO payload */
6181 memcpy(logo_data->logo_wwpn, &payload[2],
6182 sizeof(struct lpfc_name));
6183 break;
6184 default:
6185 kfree(els_data);
6186 return;
6187 }
6188 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
6189 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
6190 if (*payload == ELS_CMD_LOGO) {
6191 fc_host_post_vendor_event(shost,
6192 fc_get_event_number(),
6193 sizeof(struct lpfc_logo_event),
6194 (char *)logo_data,
6195 LPFC_NL_VENDOR_ID);
6196 kfree(logo_data);
6197 } else {
6198 fc_host_post_vendor_event(shost,
6199 fc_get_event_number(),
6200 sizeof(struct lpfc_els_event_header),
6201 (char *)els_data,
6202 LPFC_NL_VENDOR_ID);
6203 kfree(els_data);
6204 }
6205
6206 return;
6207 }
6208
6209
6210 /**
6211 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
6212 * @phba: pointer to lpfc hba data structure.
6213 * @pring: pointer to a SLI ring.
6214 * @vport: pointer to a host virtual N_Port data structure.
6215 * @elsiocb: pointer to lpfc els command iocb data structure.
6216 *
6217 * This routine is used for processing the IOCB associated with a unsolicited
6218 * event. It first determines whether there is an existing ndlp that matches
6219 * the DID from the unsolicited IOCB. If not, it will create a new one with
6220 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
6221 * IOCB is then used to invoke the proper routine and to set up proper state
6222 * of the discovery state machine.
6223 **/
6224 static void
6225 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6226 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
6227 {
6228 struct Scsi_Host *shost;
6229 struct lpfc_nodelist *ndlp;
6230 struct ls_rjt stat;
6231 uint32_t *payload;
6232 uint32_t cmd, did, newnode, rjt_err = 0;
6233 IOCB_t *icmd = &elsiocb->iocb;
6234
6235 if (!vport || !(elsiocb->context2))
6236 goto dropit;
6237
6238 newnode = 0;
6239 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
6240 cmd = *payload;
6241 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
6242 lpfc_post_buffer(phba, pring, 1);
6243
6244 did = icmd->un.rcvels.remoteID;
6245 if (icmd->ulpStatus) {
6246 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6247 "RCV Unsol ELS: status:x%x/x%x did:x%x",
6248 icmd->ulpStatus, icmd->un.ulpWord[4], did);
6249 goto dropit;
6250 }
6251
6252 /* Check to see if link went down during discovery */
6253 if (lpfc_els_chk_latt(vport))
6254 goto dropit;
6255
6256 /* Ignore traffic received during vport shutdown. */
6257 if (vport->load_flag & FC_UNLOADING)
6258 goto dropit;
6259
6260 /* If NPort discovery is delayed drop incoming ELS */
6261 if ((vport->fc_flag & FC_DISC_DELAYED) &&
6262 (cmd != ELS_CMD_PLOGI))
6263 goto dropit;
6264
6265 ndlp = lpfc_findnode_did(vport, did);
6266 if (!ndlp) {
6267 /* Cannot find existing Fabric ndlp, so allocate a new one */
6268 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6269 if (!ndlp)
6270 goto dropit;
6271
6272 lpfc_nlp_init(vport, ndlp, did);
6273 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6274 newnode = 1;
6275 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6276 ndlp->nlp_type |= NLP_FABRIC;
6277 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6278 ndlp = lpfc_enable_node(vport, ndlp,
6279 NLP_STE_UNUSED_NODE);
6280 if (!ndlp)
6281 goto dropit;
6282 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6283 newnode = 1;
6284 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6285 ndlp->nlp_type |= NLP_FABRIC;
6286 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
6287 /* This is similar to the new node path */
6288 ndlp = lpfc_nlp_get(ndlp);
6289 if (!ndlp)
6290 goto dropit;
6291 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6292 newnode = 1;
6293 }
6294
6295 phba->fc_stat.elsRcvFrame++;
6296
6297 elsiocb->context1 = lpfc_nlp_get(ndlp);
6298 elsiocb->vport = vport;
6299
6300 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
6301 cmd &= ELS_CMD_MASK;
6302 }
6303 /* ELS command <elsCmd> received from NPORT <did> */
6304 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6305 "0112 ELS command x%x received from NPORT x%x "
6306 "Data: x%x\n", cmd, did, vport->port_state);
6307 switch (cmd) {
6308 case ELS_CMD_PLOGI:
6309 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6310 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
6311 did, vport->port_state, ndlp->nlp_flag);
6312
6313 phba->fc_stat.elsRcvPLOGI++;
6314 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6315
6316 lpfc_send_els_event(vport, ndlp, payload);
6317
6318 /* If Nport discovery is delayed, reject PLOGIs */
6319 if (vport->fc_flag & FC_DISC_DELAYED) {
6320 rjt_err = LSRJT_UNABLE_TPC;
6321 break;
6322 }
6323 if (vport->port_state < LPFC_DISC_AUTH) {
6324 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6325 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
6326 rjt_err = LSRJT_UNABLE_TPC;
6327 break;
6328 }
6329 /* We get here, and drop thru, if we are PT2PT with
6330 * another NPort and the other side has initiated
6331 * the PLOGI before responding to our FLOGI.
6332 */
6333 }
6334
6335 shost = lpfc_shost_from_vport(vport);
6336 spin_lock_irq(shost->host_lock);
6337 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6338 spin_unlock_irq(shost->host_lock);
6339
6340 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6341 NLP_EVT_RCV_PLOGI);
6342
6343 break;
6344 case ELS_CMD_FLOGI:
6345 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6346 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
6347 did, vport->port_state, ndlp->nlp_flag);
6348
6349 phba->fc_stat.elsRcvFLOGI++;
6350 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
6351 if (newnode)
6352 lpfc_nlp_put(ndlp);
6353 break;
6354 case ELS_CMD_LOGO:
6355 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6356 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
6357 did, vport->port_state, ndlp->nlp_flag);
6358
6359 phba->fc_stat.elsRcvLOGO++;
6360 lpfc_send_els_event(vport, ndlp, payload);
6361 if (vport->port_state < LPFC_DISC_AUTH) {
6362 rjt_err = LSRJT_UNABLE_TPC;
6363 break;
6364 }
6365 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
6366 break;
6367 case ELS_CMD_PRLO:
6368 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6369 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
6370 did, vport->port_state, ndlp->nlp_flag);
6371
6372 phba->fc_stat.elsRcvPRLO++;
6373 lpfc_send_els_event(vport, ndlp, payload);
6374 if (vport->port_state < LPFC_DISC_AUTH) {
6375 rjt_err = LSRJT_UNABLE_TPC;
6376 break;
6377 }
6378 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
6379 break;
6380 case ELS_CMD_RSCN:
6381 phba->fc_stat.elsRcvRSCN++;
6382 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
6383 if (newnode)
6384 lpfc_nlp_put(ndlp);
6385 break;
6386 case ELS_CMD_ADISC:
6387 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6388 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
6389 did, vport->port_state, ndlp->nlp_flag);
6390
6391 lpfc_send_els_event(vport, ndlp, payload);
6392 phba->fc_stat.elsRcvADISC++;
6393 if (vport->port_state < LPFC_DISC_AUTH) {
6394 rjt_err = LSRJT_UNABLE_TPC;
6395 break;
6396 }
6397 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6398 NLP_EVT_RCV_ADISC);
6399 break;
6400 case ELS_CMD_PDISC:
6401 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6402 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
6403 did, vport->port_state, ndlp->nlp_flag);
6404
6405 phba->fc_stat.elsRcvPDISC++;
6406 if (vport->port_state < LPFC_DISC_AUTH) {
6407 rjt_err = LSRJT_UNABLE_TPC;
6408 break;
6409 }
6410 lpfc_disc_state_machine(vport, ndlp, elsiocb,
6411 NLP_EVT_RCV_PDISC);
6412 break;
6413 case ELS_CMD_FARPR:
6414 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6415 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
6416 did, vport->port_state, ndlp->nlp_flag);
6417
6418 phba->fc_stat.elsRcvFARPR++;
6419 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
6420 break;
6421 case ELS_CMD_FARP:
6422 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6423 "RCV FARP: did:x%x/ste:x%x flg:x%x",
6424 did, vport->port_state, ndlp->nlp_flag);
6425
6426 phba->fc_stat.elsRcvFARP++;
6427 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
6428 break;
6429 case ELS_CMD_FAN:
6430 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6431 "RCV FAN: did:x%x/ste:x%x flg:x%x",
6432 did, vport->port_state, ndlp->nlp_flag);
6433
6434 phba->fc_stat.elsRcvFAN++;
6435 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
6436 break;
6437 case ELS_CMD_PRLI:
6438 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6439 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
6440 did, vport->port_state, ndlp->nlp_flag);
6441
6442 phba->fc_stat.elsRcvPRLI++;
6443 if (vport->port_state < LPFC_DISC_AUTH) {
6444 rjt_err = LSRJT_UNABLE_TPC;
6445 break;
6446 }
6447 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
6448 break;
6449 case ELS_CMD_LIRR:
6450 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6451 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
6452 did, vport->port_state, ndlp->nlp_flag);
6453
6454 phba->fc_stat.elsRcvLIRR++;
6455 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
6456 if (newnode)
6457 lpfc_nlp_put(ndlp);
6458 break;
6459 case ELS_CMD_RLS:
6460 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6461 "RCV RLS: did:x%x/ste:x%x flg:x%x",
6462 did, vport->port_state, ndlp->nlp_flag);
6463
6464 phba->fc_stat.elsRcvRLS++;
6465 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
6466 if (newnode)
6467 lpfc_nlp_put(ndlp);
6468 break;
6469 case ELS_CMD_RPS:
6470 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6471 "RCV RPS: did:x%x/ste:x%x flg:x%x",
6472 did, vport->port_state, ndlp->nlp_flag);
6473
6474 phba->fc_stat.elsRcvRPS++;
6475 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
6476 if (newnode)
6477 lpfc_nlp_put(ndlp);
6478 break;
6479 case ELS_CMD_RPL:
6480 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6481 "RCV RPL: did:x%x/ste:x%x flg:x%x",
6482 did, vport->port_state, ndlp->nlp_flag);
6483
6484 phba->fc_stat.elsRcvRPL++;
6485 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
6486 if (newnode)
6487 lpfc_nlp_put(ndlp);
6488 break;
6489 case ELS_CMD_RNID:
6490 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6491 "RCV RNID: did:x%x/ste:x%x flg:x%x",
6492 did, vport->port_state, ndlp->nlp_flag);
6493
6494 phba->fc_stat.elsRcvRNID++;
6495 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
6496 if (newnode)
6497 lpfc_nlp_put(ndlp);
6498 break;
6499 case ELS_CMD_RTV:
6500 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6501 "RCV RTV: did:x%x/ste:x%x flg:x%x",
6502 did, vport->port_state, ndlp->nlp_flag);
6503 phba->fc_stat.elsRcvRTV++;
6504 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
6505 if (newnode)
6506 lpfc_nlp_put(ndlp);
6507 break;
6508 case ELS_CMD_RRQ:
6509 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6510 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
6511 did, vport->port_state, ndlp->nlp_flag);
6512
6513 phba->fc_stat.elsRcvRRQ++;
6514 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
6515 if (newnode)
6516 lpfc_nlp_put(ndlp);
6517 break;
6518 case ELS_CMD_ECHO:
6519 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6520 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
6521 did, vport->port_state, ndlp->nlp_flag);
6522
6523 phba->fc_stat.elsRcvECHO++;
6524 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
6525 if (newnode)
6526 lpfc_nlp_put(ndlp);
6527 break;
6528 default:
6529 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6530 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
6531 cmd, did, vport->port_state);
6532
6533 /* Unsupported ELS command, reject */
6534 rjt_err = LSRJT_CMD_UNSUPPORTED;
6535
6536 /* Unknown ELS command <elsCmd> received from NPORT <did> */
6537 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6538 "0115 Unknown ELS command x%x "
6539 "received from NPORT x%x\n", cmd, did);
6540 if (newnode)
6541 lpfc_nlp_put(ndlp);
6542 break;
6543 }
6544
6545 /* check if need to LS_RJT received ELS cmd */
6546 if (rjt_err) {
6547 memset(&stat, 0, sizeof(stat));
6548 stat.un.b.lsRjtRsnCode = rjt_err;
6549 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
6550 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
6551 NULL);
6552 }
6553
6554 lpfc_nlp_put(elsiocb->context1);
6555 elsiocb->context1 = NULL;
6556 return;
6557
6558 dropit:
6559 if (vport && !(vport->load_flag & FC_UNLOADING))
6560 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6561 "0111 Dropping received ELS cmd "
6562 "Data: x%x x%x x%x\n",
6563 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
6564 phba->fc_stat.elsRcvDrop++;
6565 }
6566
6567 /**
6568 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6569 * @phba: pointer to lpfc hba data structure.
6570 * @vpi: host virtual N_Port identifier.
6571 *
6572 * This routine finds a vport on a HBA (referred by @phba) through a
6573 * @vpi. The function walks the HBA's vport list and returns the address
6574 * of the vport with the matching @vpi.
6575 *
6576 * Return code
6577 * NULL - No vport with the matching @vpi found
6578 * Otherwise - Address to the vport with the matching @vpi.
6579 **/
6580 struct lpfc_vport *
6581 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6582 {
6583 struct lpfc_vport *vport;
6584 unsigned long flags;
6585
6586 spin_lock_irqsave(&phba->hbalock, flags);
6587 list_for_each_entry(vport, &phba->port_list, listentry) {
6588 if (vport->vpi == vpi) {
6589 spin_unlock_irqrestore(&phba->hbalock, flags);
6590 return vport;
6591 }
6592 }
6593 spin_unlock_irqrestore(&phba->hbalock, flags);
6594 return NULL;
6595 }
6596
6597 /**
6598 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
6599 * @phba: pointer to lpfc hba data structure.
6600 * @pring: pointer to a SLI ring.
6601 * @elsiocb: pointer to lpfc els iocb data structure.
6602 *
6603 * This routine is used to process an unsolicited event received from a SLI
6604 * (Service Level Interface) ring. The actual processing of the data buffer
6605 * associated with the unsolicited event is done by invoking the routine
6606 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
6607 * SLI ring on which the unsolicited event was received.
6608 **/
6609 void
6610 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6611 struct lpfc_iocbq *elsiocb)
6612 {
6613 struct lpfc_vport *vport = phba->pport;
6614 IOCB_t *icmd = &elsiocb->iocb;
6615 dma_addr_t paddr;
6616 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
6617 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
6618
6619 elsiocb->context1 = NULL;
6620 elsiocb->context2 = NULL;
6621 elsiocb->context3 = NULL;
6622
6623 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
6624 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
6625 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
6626 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
6627 phba->fc_stat.NoRcvBuf++;
6628 /* Not enough posted buffers; Try posting more buffers */
6629 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
6630 lpfc_post_buffer(phba, pring, 0);
6631 return;
6632 }
6633
6634 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6635 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
6636 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6637 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
6638 vport = phba->pport;
6639 else
6640 vport = lpfc_find_vport_by_vpid(phba,
6641 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
6642 }
6643 /* If there are no BDEs associated
6644 * with this IOCB, there is nothing to do.
6645 */
6646 if (icmd->ulpBdeCount == 0)
6647 return;
6648
6649 /* type of ELS cmd is first 32bit word
6650 * in packet
6651 */
6652 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
6653 elsiocb->context2 = bdeBuf1;
6654 } else {
6655 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
6656 icmd->un.cont64[0].addrLow);
6657 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
6658 paddr);
6659 }
6660
6661 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6662 /*
6663 * The different unsolicited event handlers would tell us
6664 * if they are done with "mp" by setting context2 to NULL.
6665 */
6666 if (elsiocb->context2) {
6667 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
6668 elsiocb->context2 = NULL;
6669 }
6670
6671 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
6672 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
6673 icmd->ulpBdeCount == 2) {
6674 elsiocb->context2 = bdeBuf2;
6675 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6676 /* free mp if we are done with it */
6677 if (elsiocb->context2) {
6678 lpfc_in_buf_free(phba, elsiocb->context2);
6679 elsiocb->context2 = NULL;
6680 }
6681 }
6682 }
6683
6684 /**
6685 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
6686 * @phba: pointer to lpfc hba data structure.
6687 * @vport: pointer to a virtual N_Port data structure.
6688 *
6689 * This routine issues a Port Login (PLOGI) to the Name Server with
6690 * State Change Request (SCR) for a @vport. This routine will create an
6691 * ndlp for the Name Server associated to the @vport if such node does
6692 * not already exist. The PLOGI to Name Server is issued by invoking the
6693 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
6694 * (FDMI) is configured to the @vport, a FDMI node will be created and
6695 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
6696 **/
6697 void
6698 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6699 {
6700 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
6701 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6702
6703 /*
6704 * If lpfc_delay_discovery parameter is set and the clean address
6705 * bit is cleared and fc fabric parameters chenged, delay FC NPort
6706 * discovery.
6707 */
6708 spin_lock_irq(shost->host_lock);
6709 if (vport->fc_flag & FC_DISC_DELAYED) {
6710 spin_unlock_irq(shost->host_lock);
6711 mod_timer(&vport->delayed_disc_tmo,
6712 jiffies + HZ * phba->fc_ratov);
6713 return;
6714 }
6715 spin_unlock_irq(shost->host_lock);
6716
6717 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6718 if (!ndlp) {
6719 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6720 if (!ndlp) {
6721 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6722 lpfc_disc_start(vport);
6723 return;
6724 }
6725 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6726 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6727 "0251 NameServer login: no memory\n");
6728 return;
6729 }
6730 lpfc_nlp_init(vport, ndlp, NameServer_DID);
6731 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6732 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
6733 if (!ndlp) {
6734 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6735 lpfc_disc_start(vport);
6736 return;
6737 }
6738 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6739 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6740 "0348 NameServer login: node freed\n");
6741 return;
6742 }
6743 }
6744 ndlp->nlp_type |= NLP_FABRIC;
6745
6746 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6747
6748 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
6749 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6750 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6751 "0252 Cannot issue NameServer login\n");
6752 return;
6753 }
6754
6755 if (vport->cfg_fdmi_on) {
6756 /* If this is the first time, allocate an ndlp and initialize
6757 * it. Otherwise, make sure the node is enabled and then do the
6758 * login.
6759 */
6760 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
6761 if (!ndlp_fdmi) {
6762 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
6763 GFP_KERNEL);
6764 if (ndlp_fdmi) {
6765 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
6766 ndlp_fdmi->nlp_type |= NLP_FABRIC;
6767 } else
6768 return;
6769 }
6770 if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
6771 ndlp_fdmi = lpfc_enable_node(vport,
6772 ndlp_fdmi,
6773 NLP_STE_NPR_NODE);
6774
6775 if (ndlp_fdmi) {
6776 lpfc_nlp_set_state(vport, ndlp_fdmi,
6777 NLP_STE_PLOGI_ISSUE);
6778 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
6779 }
6780 }
6781 }
6782
6783 /**
6784 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
6785 * @phba: pointer to lpfc hba data structure.
6786 * @pmb: pointer to the driver internal queue element for mailbox command.
6787 *
6788 * This routine is the completion callback function to register new vport
6789 * mailbox command. If the new vport mailbox command completes successfully,
6790 * the fabric registration login shall be performed on physical port (the
6791 * new vport created is actually a physical port, with VPI 0) or the port
6792 * login to Name Server for State Change Request (SCR) will be performed
6793 * on virtual port (real virtual port, with VPI greater than 0).
6794 **/
6795 static void
6796 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6797 {
6798 struct lpfc_vport *vport = pmb->vport;
6799 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6800 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
6801 MAILBOX_t *mb = &pmb->u.mb;
6802 int rc;
6803
6804 spin_lock_irq(shost->host_lock);
6805 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6806 spin_unlock_irq(shost->host_lock);
6807
6808 if (mb->mbxStatus) {
6809 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6810 "0915 Register VPI failed : Status: x%x"
6811 " upd bit: x%x \n", mb->mbxStatus,
6812 mb->un.varRegVpi.upd);
6813 if (phba->sli_rev == LPFC_SLI_REV4 &&
6814 mb->un.varRegVpi.upd)
6815 goto mbox_err_exit ;
6816
6817 switch (mb->mbxStatus) {
6818 case 0x11: /* unsupported feature */
6819 case 0x9603: /* max_vpi exceeded */
6820 case 0x9602: /* Link event since CLEAR_LA */
6821 /* giving up on vport registration */
6822 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6823 spin_lock_irq(shost->host_lock);
6824 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6825 spin_unlock_irq(shost->host_lock);
6826 lpfc_can_disctmo(vport);
6827 break;
6828 /* If reg_vpi fail with invalid VPI status, re-init VPI */
6829 case 0x20:
6830 spin_lock_irq(shost->host_lock);
6831 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6832 spin_unlock_irq(shost->host_lock);
6833 lpfc_init_vpi(phba, pmb, vport->vpi);
6834 pmb->vport = vport;
6835 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6836 rc = lpfc_sli_issue_mbox(phba, pmb,
6837 MBX_NOWAIT);
6838 if (rc == MBX_NOT_FINISHED) {
6839 lpfc_printf_vlog(vport,
6840 KERN_ERR, LOG_MBOX,
6841 "2732 Failed to issue INIT_VPI"
6842 " mailbox command\n");
6843 } else {
6844 lpfc_nlp_put(ndlp);
6845 return;
6846 }
6847
6848 default:
6849 /* Try to recover from this error */
6850 if (phba->sli_rev == LPFC_SLI_REV4)
6851 lpfc_sli4_unreg_all_rpis(vport);
6852 lpfc_mbx_unreg_vpi(vport);
6853 spin_lock_irq(shost->host_lock);
6854 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6855 spin_unlock_irq(shost->host_lock);
6856 if (vport->port_type == LPFC_PHYSICAL_PORT
6857 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
6858 lpfc_issue_init_vfi(vport);
6859 else
6860 lpfc_initial_fdisc(vport);
6861 break;
6862 }
6863 } else {
6864 spin_lock_irq(shost->host_lock);
6865 vport->vpi_state |= LPFC_VPI_REGISTERED;
6866 spin_unlock_irq(shost->host_lock);
6867 if (vport == phba->pport) {
6868 if (phba->sli_rev < LPFC_SLI_REV4)
6869 lpfc_issue_fabric_reglogin(vport);
6870 else {
6871 /*
6872 * If the physical port is instantiated using
6873 * FDISC, do not start vport discovery.
6874 */
6875 if (vport->port_state != LPFC_FDISC)
6876 lpfc_start_fdiscs(phba);
6877 lpfc_do_scr_ns_plogi(phba, vport);
6878 }
6879 } else
6880 lpfc_do_scr_ns_plogi(phba, vport);
6881 }
6882 mbox_err_exit:
6883 /* Now, we decrement the ndlp reference count held for this
6884 * callback function
6885 */
6886 lpfc_nlp_put(ndlp);
6887
6888 mempool_free(pmb, phba->mbox_mem_pool);
6889 return;
6890 }
6891
6892 /**
6893 * lpfc_register_new_vport - Register a new vport with a HBA
6894 * @phba: pointer to lpfc hba data structure.
6895 * @vport: pointer to a host virtual N_Port data structure.
6896 * @ndlp: pointer to a node-list data structure.
6897 *
6898 * This routine registers the @vport as a new virtual port with a HBA.
6899 * It is done through a registering vpi mailbox command.
6900 **/
6901 void
6902 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
6903 struct lpfc_nodelist *ndlp)
6904 {
6905 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6906 LPFC_MBOXQ_t *mbox;
6907
6908 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6909 if (mbox) {
6910 lpfc_reg_vpi(vport, mbox);
6911 mbox->vport = vport;
6912 mbox->context2 = lpfc_nlp_get(ndlp);
6913 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
6914 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6915 == MBX_NOT_FINISHED) {
6916 /* mailbox command not success, decrement ndlp
6917 * reference count for this command
6918 */
6919 lpfc_nlp_put(ndlp);
6920 mempool_free(mbox, phba->mbox_mem_pool);
6921
6922 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6923 "0253 Register VPI: Can't send mbox\n");
6924 goto mbox_err_exit;
6925 }
6926 } else {
6927 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6928 "0254 Register VPI: no memory\n");
6929 goto mbox_err_exit;
6930 }
6931 return;
6932
6933 mbox_err_exit:
6934 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6935 spin_lock_irq(shost->host_lock);
6936 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6937 spin_unlock_irq(shost->host_lock);
6938 return;
6939 }
6940
6941 /**
6942 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6943 * @phba: pointer to lpfc hba data structure.
6944 *
6945 * This routine cancels the retry delay timers to all the vports.
6946 **/
6947 void
6948 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6949 {
6950 struct lpfc_vport **vports;
6951 struct lpfc_nodelist *ndlp;
6952 uint32_t link_state;
6953 int i;
6954
6955 /* Treat this failure as linkdown for all vports */
6956 link_state = phba->link_state;
6957 lpfc_linkdown(phba);
6958 phba->link_state = link_state;
6959
6960 vports = lpfc_create_vport_work_array(phba);
6961
6962 if (vports) {
6963 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6964 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6965 if (ndlp)
6966 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6967 lpfc_els_flush_cmd(vports[i]);
6968 }
6969 lpfc_destroy_vport_work_array(phba, vports);
6970 }
6971 }
6972
6973 /**
6974 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6975 * @phba: pointer to lpfc hba data structure.
6976 *
6977 * This routine abort all pending discovery commands and
6978 * start a timer to retry FLOGI for the physical port
6979 * discovery.
6980 **/
6981 void
6982 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6983 {
6984 struct lpfc_nodelist *ndlp;
6985 struct Scsi_Host *shost;
6986
6987 /* Cancel the all vports retry delay retry timers */
6988 lpfc_cancel_all_vport_retry_delay_timer(phba);
6989
6990 /* If fabric require FLOGI, then re-instantiate physical login */
6991 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6992 if (!ndlp)
6993 return;
6994
6995 shost = lpfc_shost_from_vport(phba->pport);
6996 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6997 spin_lock_irq(shost->host_lock);
6998 ndlp->nlp_flag |= NLP_DELAY_TMO;
6999 spin_unlock_irq(shost->host_lock);
7000 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
7001 phba->pport->port_state = LPFC_FLOGI;
7002 return;
7003 }
7004
7005 /**
7006 * lpfc_fabric_login_reqd - Check if FLOGI required.
7007 * @phba: pointer to lpfc hba data structure.
7008 * @cmdiocb: pointer to FDISC command iocb.
7009 * @rspiocb: pointer to FDISC response iocb.
7010 *
7011 * This routine checks if a FLOGI is reguired for FDISC
7012 * to succeed.
7013 **/
7014 static int
7015 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
7016 struct lpfc_iocbq *cmdiocb,
7017 struct lpfc_iocbq *rspiocb)
7018 {
7019
7020 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
7021 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
7022 return 0;
7023 else
7024 return 1;
7025 }
7026
7027 /**
7028 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
7029 * @phba: pointer to lpfc hba data structure.
7030 * @cmdiocb: pointer to lpfc command iocb data structure.
7031 * @rspiocb: pointer to lpfc response iocb data structure.
7032 *
7033 * This routine is the completion callback function to a Fabric Discover
7034 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
7035 * single threaded, each FDISC completion callback function will reset
7036 * the discovery timer for all vports such that the timers will not get
7037 * unnecessary timeout. The function checks the FDISC IOCB status. If error
7038 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
7039 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
7040 * assigned to the vport has been changed with the completion of the FDISC
7041 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
7042 * are unregistered from the HBA, and then the lpfc_register_new_vport()
7043 * routine is invoked to register new vport with the HBA. Otherwise, the
7044 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
7045 * Server for State Change Request (SCR).
7046 **/
7047 static void
7048 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7049 struct lpfc_iocbq *rspiocb)
7050 {
7051 struct lpfc_vport *vport = cmdiocb->vport;
7052 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7053 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
7054 struct lpfc_nodelist *np;
7055 struct lpfc_nodelist *next_np;
7056 IOCB_t *irsp = &rspiocb->iocb;
7057 struct lpfc_iocbq *piocb;
7058 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
7059 struct serv_parm *sp;
7060 uint8_t fabric_param_changed;
7061
7062 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7063 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
7064 irsp->ulpStatus, irsp->un.ulpWord[4],
7065 vport->fc_prevDID);
7066 /* Since all FDISCs are being single threaded, we
7067 * must reset the discovery timer for ALL vports
7068 * waiting to send FDISC when one completes.
7069 */
7070 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
7071 lpfc_set_disctmo(piocb->vport);
7072 }
7073
7074 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7075 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
7076 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
7077
7078 if (irsp->ulpStatus) {
7079
7080 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
7081 lpfc_retry_pport_discovery(phba);
7082 goto out;
7083 }
7084
7085 /* Check for retry */
7086 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
7087 goto out;
7088 /* FDISC failed */
7089 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7090 "0126 FDISC failed. (%d/%d)\n",
7091 irsp->ulpStatus, irsp->un.ulpWord[4]);
7092 goto fdisc_failed;
7093 }
7094 spin_lock_irq(shost->host_lock);
7095 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
7096 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
7097 vport->fc_flag |= FC_FABRIC;
7098 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
7099 vport->fc_flag |= FC_PUBLIC_LOOP;
7100 spin_unlock_irq(shost->host_lock);
7101
7102 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
7103 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
7104 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
7105 sp = prsp->virt + sizeof(uint32_t);
7106 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
7107 memcpy(&vport->fabric_portname, &sp->portName,
7108 sizeof(struct lpfc_name));
7109 memcpy(&vport->fabric_nodename, &sp->nodeName,
7110 sizeof(struct lpfc_name));
7111 if (fabric_param_changed &&
7112 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7113 /* If our NportID changed, we need to ensure all
7114 * remaining NPORTs get unreg_login'ed so we can
7115 * issue unreg_vpi.
7116 */
7117 list_for_each_entry_safe(np, next_np,
7118 &vport->fc_nodes, nlp_listp) {
7119 if (!NLP_CHK_NODE_ACT(ndlp) ||
7120 (np->nlp_state != NLP_STE_NPR_NODE) ||
7121 !(np->nlp_flag & NLP_NPR_ADISC))
7122 continue;
7123 spin_lock_irq(shost->host_lock);
7124 np->nlp_flag &= ~NLP_NPR_ADISC;
7125 spin_unlock_irq(shost->host_lock);
7126 lpfc_unreg_rpi(vport, np);
7127 }
7128 lpfc_cleanup_pending_mbox(vport);
7129
7130 if (phba->sli_rev == LPFC_SLI_REV4)
7131 lpfc_sli4_unreg_all_rpis(vport);
7132
7133 lpfc_mbx_unreg_vpi(vport);
7134 spin_lock_irq(shost->host_lock);
7135 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7136 if (phba->sli_rev == LPFC_SLI_REV4)
7137 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
7138 else
7139 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
7140 spin_unlock_irq(shost->host_lock);
7141 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
7142 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7143 /*
7144 * Driver needs to re-reg VPI in order for f/w
7145 * to update the MAC address.
7146 */
7147 lpfc_register_new_vport(phba, vport, ndlp);
7148 goto out;
7149 }
7150
7151 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
7152 lpfc_issue_init_vpi(vport);
7153 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
7154 lpfc_register_new_vport(phba, vport, ndlp);
7155 else
7156 lpfc_do_scr_ns_plogi(phba, vport);
7157 goto out;
7158 fdisc_failed:
7159 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7160 /* Cancel discovery timer */
7161 lpfc_can_disctmo(vport);
7162 lpfc_nlp_put(ndlp);
7163 out:
7164 lpfc_els_free_iocb(phba, cmdiocb);
7165 }
7166
7167 /**
7168 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
7169 * @vport: pointer to a virtual N_Port data structure.
7170 * @ndlp: pointer to a node-list data structure.
7171 * @retry: number of retries to the command IOCB.
7172 *
7173 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
7174 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
7175 * routine to issue the IOCB, which makes sure only one outstanding fabric
7176 * IOCB will be sent off HBA at any given time.
7177 *
7178 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7179 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7180 * will be stored into the context1 field of the IOCB for the completion
7181 * callback function to the FDISC ELS command.
7182 *
7183 * Return code
7184 * 0 - Successfully issued fdisc iocb command
7185 * 1 - Failed to issue fdisc iocb command
7186 **/
7187 static int
7188 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7189 uint8_t retry)
7190 {
7191 struct lpfc_hba *phba = vport->phba;
7192 IOCB_t *icmd;
7193 struct lpfc_iocbq *elsiocb;
7194 struct serv_parm *sp;
7195 uint8_t *pcmd;
7196 uint16_t cmdsize;
7197 int did = ndlp->nlp_DID;
7198 int rc;
7199
7200 vport->port_state = LPFC_FDISC;
7201 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
7202 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
7203 ELS_CMD_FDISC);
7204 if (!elsiocb) {
7205 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7206 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7207 "0255 Issue FDISC: no IOCB\n");
7208 return 1;
7209 }
7210
7211 icmd = &elsiocb->iocb;
7212 icmd->un.elsreq64.myID = 0;
7213 icmd->un.elsreq64.fl = 1;
7214
7215 if ((phba->sli_rev == LPFC_SLI_REV4) &&
7216 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7217 LPFC_SLI_INTF_IF_TYPE_0)) {
7218 /* FDISC needs to be 1 for WQE VPI */
7219 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
7220 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
7221 /* Set the ulpContext to the vpi */
7222 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
7223 } else {
7224 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7225 icmd->ulpCt_h = 1;
7226 icmd->ulpCt_l = 0;
7227 }
7228
7229 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7230 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
7231 pcmd += sizeof(uint32_t); /* CSP Word 1 */
7232 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
7233 sp = (struct serv_parm *) pcmd;
7234 /* Setup CSPs accordingly for Fabric */
7235 sp->cmn.e_d_tov = 0;
7236 sp->cmn.w2.r_a_tov = 0;
7237 sp->cls1.classValid = 0;
7238 sp->cls2.seqDelivery = 1;
7239 sp->cls3.seqDelivery = 1;
7240
7241 pcmd += sizeof(uint32_t); /* CSP Word 2 */
7242 pcmd += sizeof(uint32_t); /* CSP Word 3 */
7243 pcmd += sizeof(uint32_t); /* CSP Word 4 */
7244 pcmd += sizeof(uint32_t); /* Port Name */
7245 memcpy(pcmd, &vport->fc_portname, 8);
7246 pcmd += sizeof(uint32_t); /* Node Name */
7247 pcmd += sizeof(uint32_t); /* Node Name */
7248 memcpy(pcmd, &vport->fc_nodename, 8);
7249
7250 lpfc_set_disctmo(vport);
7251
7252 phba->fc_stat.elsXmitFDISC++;
7253 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
7254
7255 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7256 "Issue FDISC: did:x%x",
7257 did, 0, 0);
7258
7259 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
7260 if (rc == IOCB_ERROR) {
7261 lpfc_els_free_iocb(phba, elsiocb);
7262 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
7263 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7264 "0256 Issue FDISC: Cannot send IOCB\n");
7265 return 1;
7266 }
7267 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
7268 return 0;
7269 }
7270
7271 /**
7272 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
7273 * @phba: pointer to lpfc hba data structure.
7274 * @cmdiocb: pointer to lpfc command iocb data structure.
7275 * @rspiocb: pointer to lpfc response iocb data structure.
7276 *
7277 * This routine is the completion callback function to the issuing of a LOGO
7278 * ELS command off a vport. It frees the command IOCB and then decrement the
7279 * reference count held on ndlp for this completion function, indicating that
7280 * the reference to the ndlp is no long needed. Note that the
7281 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
7282 * callback function and an additional explicit ndlp reference decrementation
7283 * will trigger the actual release of the ndlp.
7284 **/
7285 static void
7286 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7287 struct lpfc_iocbq *rspiocb)
7288 {
7289 struct lpfc_vport *vport = cmdiocb->vport;
7290 IOCB_t *irsp;
7291 struct lpfc_nodelist *ndlp;
7292 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
7293
7294 irsp = &rspiocb->iocb;
7295 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7296 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
7297 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
7298
7299 lpfc_els_free_iocb(phba, cmdiocb);
7300 vport->unreg_vpi_cmpl = VPORT_ERROR;
7301
7302 /* Trigger the release of the ndlp after logo */
7303 lpfc_nlp_put(ndlp);
7304 }
7305
7306 /**
7307 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
7308 * @vport: pointer to a virtual N_Port data structure.
7309 * @ndlp: pointer to a node-list data structure.
7310 *
7311 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
7312 *
7313 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7314 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7315 * will be stored into the context1 field of the IOCB for the completion
7316 * callback function to the LOGO ELS command.
7317 *
7318 * Return codes
7319 * 0 - Successfully issued logo off the @vport
7320 * 1 - Failed to issue logo off the @vport
7321 **/
7322 int
7323 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
7324 {
7325 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7326 struct lpfc_hba *phba = vport->phba;
7327 IOCB_t *icmd;
7328 struct lpfc_iocbq *elsiocb;
7329 uint8_t *pcmd;
7330 uint16_t cmdsize;
7331
7332 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
7333 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
7334 ELS_CMD_LOGO);
7335 if (!elsiocb)
7336 return 1;
7337
7338 icmd = &elsiocb->iocb;
7339 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7340 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
7341 pcmd += sizeof(uint32_t);
7342
7343 /* Fill in LOGO payload */
7344 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
7345 pcmd += sizeof(uint32_t);
7346 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
7347
7348 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7349 "Issue LOGO npiv did:x%x flg:x%x",
7350 ndlp->nlp_DID, ndlp->nlp_flag, 0);
7351
7352 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
7353 spin_lock_irq(shost->host_lock);
7354 ndlp->nlp_flag |= NLP_LOGO_SND;
7355 spin_unlock_irq(shost->host_lock);
7356 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7357 IOCB_ERROR) {
7358 spin_lock_irq(shost->host_lock);
7359 ndlp->nlp_flag &= ~NLP_LOGO_SND;
7360 spin_unlock_irq(shost->host_lock);
7361 lpfc_els_free_iocb(phba, elsiocb);
7362 return 1;
7363 }
7364 return 0;
7365 }
7366
7367 /**
7368 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
7369 * @ptr: holder for the timer function associated data.
7370 *
7371 * This routine is invoked by the fabric iocb block timer after
7372 * timeout. It posts the fabric iocb block timeout event by setting the
7373 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
7374 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
7375 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
7376 * posted event WORKER_FABRIC_BLOCK_TMO.
7377 **/
7378 void
7379 lpfc_fabric_block_timeout(unsigned long ptr)
7380 {
7381 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
7382 unsigned long iflags;
7383 uint32_t tmo_posted;
7384
7385 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
7386 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
7387 if (!tmo_posted)
7388 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
7389 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
7390
7391 if (!tmo_posted)
7392 lpfc_worker_wake_up(phba);
7393 return;
7394 }
7395
7396 /**
7397 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
7398 * @phba: pointer to lpfc hba data structure.
7399 *
7400 * This routine issues one fabric iocb from the driver internal list to
7401 * the HBA. It first checks whether it's ready to issue one fabric iocb to
7402 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
7403 * remove one pending fabric iocb from the driver internal list and invokes
7404 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
7405 **/
7406 static void
7407 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
7408 {
7409 struct lpfc_iocbq *iocb;
7410 unsigned long iflags;
7411 int ret;
7412 IOCB_t *cmd;
7413
7414 repeat:
7415 iocb = NULL;
7416 spin_lock_irqsave(&phba->hbalock, iflags);
7417 /* Post any pending iocb to the SLI layer */
7418 if (atomic_read(&phba->fabric_iocb_count) == 0) {
7419 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
7420 list);
7421 if (iocb)
7422 /* Increment fabric iocb count to hold the position */
7423 atomic_inc(&phba->fabric_iocb_count);
7424 }
7425 spin_unlock_irqrestore(&phba->hbalock, iflags);
7426 if (iocb) {
7427 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7428 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7429 iocb->iocb_flag |= LPFC_IO_FABRIC;
7430
7431 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
7432 "Fabric sched1: ste:x%x",
7433 iocb->vport->port_state, 0, 0);
7434
7435 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
7436
7437 if (ret == IOCB_ERROR) {
7438 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7439 iocb->fabric_iocb_cmpl = NULL;
7440 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7441 cmd = &iocb->iocb;
7442 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
7443 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
7444 iocb->iocb_cmpl(phba, iocb, iocb);
7445
7446 atomic_dec(&phba->fabric_iocb_count);
7447 goto repeat;
7448 }
7449 }
7450
7451 return;
7452 }
7453
7454 /**
7455 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
7456 * @phba: pointer to lpfc hba data structure.
7457 *
7458 * This routine unblocks the issuing fabric iocb command. The function
7459 * will clear the fabric iocb block bit and then invoke the routine
7460 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
7461 * from the driver internal fabric iocb list.
7462 **/
7463 void
7464 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
7465 {
7466 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7467
7468 lpfc_resume_fabric_iocbs(phba);
7469 return;
7470 }
7471
7472 /**
7473 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
7474 * @phba: pointer to lpfc hba data structure.
7475 *
7476 * This routine blocks the issuing fabric iocb for a specified amount of
7477 * time (currently 100 ms). This is done by set the fabric iocb block bit
7478 * and set up a timeout timer for 100ms. When the block bit is set, no more
7479 * fabric iocb will be issued out of the HBA.
7480 **/
7481 static void
7482 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7483 {
7484 int blocked;
7485
7486 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7487 /* Start a timer to unblock fabric iocbs after 100ms */
7488 if (!blocked)
7489 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
7490
7491 return;
7492 }
7493
7494 /**
7495 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
7496 * @phba: pointer to lpfc hba data structure.
7497 * @cmdiocb: pointer to lpfc command iocb data structure.
7498 * @rspiocb: pointer to lpfc response iocb data structure.
7499 *
7500 * This routine is the callback function that is put to the fabric iocb's
7501 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
7502 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
7503 * function first restores and invokes the original iocb's callback function
7504 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
7505 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
7506 **/
7507 static void
7508 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7509 struct lpfc_iocbq *rspiocb)
7510 {
7511 struct ls_rjt stat;
7512
7513 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
7514 BUG();
7515
7516 switch (rspiocb->iocb.ulpStatus) {
7517 case IOSTAT_NPORT_RJT:
7518 case IOSTAT_FABRIC_RJT:
7519 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
7520 lpfc_block_fabric_iocbs(phba);
7521 }
7522 break;
7523
7524 case IOSTAT_NPORT_BSY:
7525 case IOSTAT_FABRIC_BSY:
7526 lpfc_block_fabric_iocbs(phba);
7527 break;
7528
7529 case IOSTAT_LS_RJT:
7530 stat.un.lsRjtError =
7531 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
7532 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
7533 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
7534 lpfc_block_fabric_iocbs(phba);
7535 break;
7536 }
7537
7538 if (atomic_read(&phba->fabric_iocb_count) == 0)
7539 BUG();
7540
7541 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
7542 cmdiocb->fabric_iocb_cmpl = NULL;
7543 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
7544 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
7545
7546 atomic_dec(&phba->fabric_iocb_count);
7547 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7548 /* Post any pending iocbs to HBA */
7549 lpfc_resume_fabric_iocbs(phba);
7550 }
7551 }
7552
7553 /**
7554 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
7555 * @phba: pointer to lpfc hba data structure.
7556 * @iocb: pointer to lpfc command iocb data structure.
7557 *
7558 * This routine is used as the top-level API for issuing a fabric iocb command
7559 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
7560 * function makes sure that only one fabric bound iocb will be outstanding at
7561 * any given time. As such, this function will first check to see whether there
7562 * is already an outstanding fabric iocb on the wire. If so, it will put the
7563 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
7564 * issued later. Otherwise, it will issue the iocb on the wire and update the
7565 * fabric iocb count it indicate that there is one fabric iocb on the wire.
7566 *
7567 * Note, this implementation has a potential sending out fabric IOCBs out of
7568 * order. The problem is caused by the construction of the "ready" boolen does
7569 * not include the condition that the internal fabric IOCB list is empty. As
7570 * such, it is possible a fabric IOCB issued by this routine might be "jump"
7571 * ahead of the fabric IOCBs in the internal list.
7572 *
7573 * Return code
7574 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
7575 * IOCB_ERROR - failed to issue fabric iocb
7576 **/
7577 static int
7578 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
7579 {
7580 unsigned long iflags;
7581 int ready;
7582 int ret;
7583
7584 if (atomic_read(&phba->fabric_iocb_count) > 1)
7585 BUG();
7586
7587 spin_lock_irqsave(&phba->hbalock, iflags);
7588 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
7589 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7590
7591 if (ready)
7592 /* Increment fabric iocb count to hold the position */
7593 atomic_inc(&phba->fabric_iocb_count);
7594 spin_unlock_irqrestore(&phba->hbalock, iflags);
7595 if (ready) {
7596 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7597 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7598 iocb->iocb_flag |= LPFC_IO_FABRIC;
7599
7600 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
7601 "Fabric sched2: ste:x%x",
7602 iocb->vport->port_state, 0, 0);
7603
7604 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
7605
7606 if (ret == IOCB_ERROR) {
7607 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7608 iocb->fabric_iocb_cmpl = NULL;
7609 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7610 atomic_dec(&phba->fabric_iocb_count);
7611 }
7612 } else {
7613 spin_lock_irqsave(&phba->hbalock, iflags);
7614 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
7615 spin_unlock_irqrestore(&phba->hbalock, iflags);
7616 ret = IOCB_SUCCESS;
7617 }
7618 return ret;
7619 }
7620
7621 /**
7622 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
7623 * @vport: pointer to a virtual N_Port data structure.
7624 *
7625 * This routine aborts all the IOCBs associated with a @vport from the
7626 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7627 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7628 * list, removes each IOCB associated with the @vport off the list, set the
7629 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7630 * associated with the IOCB.
7631 **/
7632 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
7633 {
7634 LIST_HEAD(completions);
7635 struct lpfc_hba *phba = vport->phba;
7636 struct lpfc_iocbq *tmp_iocb, *piocb;
7637
7638 spin_lock_irq(&phba->hbalock);
7639 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7640 list) {
7641
7642 if (piocb->vport != vport)
7643 continue;
7644
7645 list_move_tail(&piocb->list, &completions);
7646 }
7647 spin_unlock_irq(&phba->hbalock);
7648
7649 /* Cancel all the IOCBs from the completions list */
7650 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7651 IOERR_SLI_ABORTED);
7652 }
7653
7654 /**
7655 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
7656 * @ndlp: pointer to a node-list data structure.
7657 *
7658 * This routine aborts all the IOCBs associated with an @ndlp from the
7659 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
7660 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
7661 * list, removes each IOCB associated with the @ndlp off the list, set the
7662 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
7663 * associated with the IOCB.
7664 **/
7665 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
7666 {
7667 LIST_HEAD(completions);
7668 struct lpfc_hba *phba = ndlp->phba;
7669 struct lpfc_iocbq *tmp_iocb, *piocb;
7670 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7671
7672 spin_lock_irq(&phba->hbalock);
7673 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7674 list) {
7675 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
7676
7677 list_move_tail(&piocb->list, &completions);
7678 }
7679 }
7680 spin_unlock_irq(&phba->hbalock);
7681
7682 /* Cancel all the IOCBs from the completions list */
7683 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7684 IOERR_SLI_ABORTED);
7685 }
7686
7687 /**
7688 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
7689 * @phba: pointer to lpfc hba data structure.
7690 *
7691 * This routine aborts all the IOCBs currently on the driver internal
7692 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
7693 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
7694 * list, removes IOCBs off the list, set the status feild to
7695 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
7696 * the IOCB.
7697 **/
7698 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
7699 {
7700 LIST_HEAD(completions);
7701
7702 spin_lock_irq(&phba->hbalock);
7703 list_splice_init(&phba->fabric_iocb_list, &completions);
7704 spin_unlock_irq(&phba->hbalock);
7705
7706 /* Cancel all the IOCBs from the completions list */
7707 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7708 IOERR_SLI_ABORTED);
7709 }
7710
7711 /**
7712 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
7713 * @vport: pointer to lpfc vport data structure.
7714 *
7715 * This routine is invoked by the vport cleanup for deletions and the cleanup
7716 * for an ndlp on removal.
7717 **/
7718 void
7719 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
7720 {
7721 struct lpfc_hba *phba = vport->phba;
7722 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7723 unsigned long iflag = 0;
7724
7725 spin_lock_irqsave(&phba->hbalock, iflag);
7726 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
7727 list_for_each_entry_safe(sglq_entry, sglq_next,
7728 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7729 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
7730 sglq_entry->ndlp = NULL;
7731 }
7732 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7733 spin_unlock_irqrestore(&phba->hbalock, iflag);
7734 return;
7735 }
7736
7737 /**
7738 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
7739 * @phba: pointer to lpfc hba data structure.
7740 * @axri: pointer to the els xri abort wcqe structure.
7741 *
7742 * This routine is invoked by the worker thread to process a SLI4 slow-path
7743 * ELS aborted xri.
7744 **/
7745 void
7746 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7747 struct sli4_wcqe_xri_aborted *axri)
7748 {
7749 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
7750 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7751
7752 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7753 unsigned long iflag = 0;
7754 struct lpfc_nodelist *ndlp;
7755 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7756
7757 spin_lock_irqsave(&phba->hbalock, iflag);
7758 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
7759 list_for_each_entry_safe(sglq_entry, sglq_next,
7760 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7761 if (sglq_entry->sli4_xritag == xri) {
7762 list_del(&sglq_entry->list);
7763 ndlp = sglq_entry->ndlp;
7764 sglq_entry->ndlp = NULL;
7765 list_add_tail(&sglq_entry->list,
7766 &phba->sli4_hba.lpfc_sgl_list);
7767 sglq_entry->state = SGL_FREED;
7768 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7769 spin_unlock_irqrestore(&phba->hbalock, iflag);
7770 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
7771
7772 /* Check if TXQ queue needs to be serviced */
7773 if (pring->txq_cnt)
7774 lpfc_worker_wake_up(phba);
7775 return;
7776 }
7777 }
7778 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7779 sglq_entry = __lpfc_get_active_sglq(phba, xri);
7780 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
7781 spin_unlock_irqrestore(&phba->hbalock, iflag);
7782 return;
7783 }
7784 sglq_entry->state = SGL_XRI_ABORTED;
7785 spin_unlock_irqrestore(&phba->hbalock, iflag);
7786 return;
7787 }