[SCSI] lpfc 8.1.12 : Update copyright year to 2007
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_hw.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
55 };
56
57 static void lpfc_disc_timeout_handler(struct lpfc_hba *);
58
59 void
60 lpfc_terminate_rport_io(struct fc_rport *rport)
61 {
62 struct lpfc_rport_data *rdata;
63 struct lpfc_nodelist * ndlp;
64 struct lpfc_hba *phba;
65
66 rdata = rport->dd_data;
67 ndlp = rdata->pnode;
68
69 if (!ndlp) {
70 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
71 printk(KERN_ERR "Cannot find remote node"
72 " to terminate I/O Data x%x\n",
73 rport->port_id);
74 return;
75 }
76
77 phba = ndlp->nlp_phba;
78
79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
83 }
84 spin_unlock_irq(phba->host->host_lock);
85
86 return;
87 }
88
89 /*
90 * This function will be called when dev_loss_tmo fire.
91 */
92 void
93 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
94 {
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp;
97 uint8_t *name;
98 int warn_on = 0;
99 struct lpfc_hba *phba;
100
101 rdata = rport->dd_data;
102 ndlp = rdata->pnode;
103
104 if (!ndlp) {
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
106 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
108 rport->port_id);
109 return;
110 }
111
112 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
113 return;
114
115 name = (uint8_t *)&ndlp->nlp_portname;
116 phba = ndlp->nlp_phba;
117
118 spin_lock_irq(phba->host->host_lock);
119
120 if (ndlp->nlp_sid != NLP_NO_SID) {
121 warn_on = 1;
122 /* flush the target */
123 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
124 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
125 }
126 if (phba->fc_flag & FC_UNLOADING)
127 warn_on = 0;
128
129 spin_unlock_irq(phba->host->host_lock);
130
131 if (warn_on) {
132 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
133 "%d:0203 Devloss timeout on "
134 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
135 "NPort x%x Data: x%x x%x x%x\n",
136 phba->brd_no,
137 *name, *(name+1), *(name+2), *(name+3),
138 *(name+4), *(name+5), *(name+6), *(name+7),
139 ndlp->nlp_DID, ndlp->nlp_flag,
140 ndlp->nlp_state, ndlp->nlp_rpi);
141 } else {
142 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
143 "%d:0204 Devloss timeout on "
144 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
145 "NPort x%x Data: x%x x%x x%x\n",
146 phba->brd_no,
147 *name, *(name+1), *(name+2), *(name+3),
148 *(name+4), *(name+5), *(name+6), *(name+7),
149 ndlp->nlp_DID, ndlp->nlp_flag,
150 ndlp->nlp_state, ndlp->nlp_rpi);
151 }
152
153 if (!(phba->fc_flag & FC_UNLOADING) &&
154 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
155 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
156 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
157 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
158 else {
159 rdata->pnode = NULL;
160 ndlp->rport = NULL;
161 lpfc_nlp_put(ndlp);
162 put_device(&rport->dev);
163 }
164
165 return;
166 }
167
168 static void
169 lpfc_work_list_done(struct lpfc_hba * phba)
170 {
171 struct lpfc_work_evt *evtp = NULL;
172 struct lpfc_nodelist *ndlp;
173 int free_evt;
174
175 spin_lock_irq(phba->host->host_lock);
176 while(!list_empty(&phba->work_list)) {
177 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
178 evt_listp);
179 spin_unlock_irq(phba->host->host_lock);
180 free_evt = 1;
181 switch (evtp->evt) {
182 case LPFC_EVT_ELS_RETRY:
183 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
184 lpfc_els_retry_delay_handler(ndlp);
185 free_evt = 0;
186 break;
187 case LPFC_EVT_ONLINE:
188 if (phba->hba_state < LPFC_LINK_DOWN)
189 *(int *)(evtp->evt_arg1) = lpfc_online(phba);
190 else
191 *(int *)(evtp->evt_arg1) = 0;
192 complete((struct completion *)(evtp->evt_arg2));
193 break;
194 case LPFC_EVT_OFFLINE_PREP:
195 if (phba->hba_state >= LPFC_LINK_DOWN)
196 lpfc_offline_prep(phba);
197 *(int *)(evtp->evt_arg1) = 0;
198 complete((struct completion *)(evtp->evt_arg2));
199 break;
200 case LPFC_EVT_OFFLINE:
201 lpfc_offline(phba);
202 lpfc_sli_brdrestart(phba);
203 *(int *)(evtp->evt_arg1) =
204 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
205 lpfc_unblock_mgmt_io(phba);
206 complete((struct completion *)(evtp->evt_arg2));
207 break;
208 case LPFC_EVT_WARM_START:
209 lpfc_offline(phba);
210 lpfc_reset_barrier(phba);
211 lpfc_sli_brdreset(phba);
212 lpfc_hba_down_post(phba);
213 *(int *)(evtp->evt_arg1) =
214 lpfc_sli_brdready(phba, HS_MBRDY);
215 lpfc_unblock_mgmt_io(phba);
216 complete((struct completion *)(evtp->evt_arg2));
217 break;
218 case LPFC_EVT_KILL:
219 lpfc_offline(phba);
220 *(int *)(evtp->evt_arg1)
221 = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba);
222 lpfc_unblock_mgmt_io(phba);
223 complete((struct completion *)(evtp->evt_arg2));
224 break;
225 }
226 if (free_evt)
227 kfree(evtp);
228 spin_lock_irq(phba->host->host_lock);
229 }
230 spin_unlock_irq(phba->host->host_lock);
231
232 }
233
234 static void
235 lpfc_work_done(struct lpfc_hba * phba)
236 {
237 struct lpfc_sli_ring *pring;
238 int i;
239 uint32_t ha_copy;
240 uint32_t control;
241 uint32_t work_hba_events;
242
243 spin_lock_irq(phba->host->host_lock);
244 ha_copy = phba->work_ha;
245 phba->work_ha = 0;
246 work_hba_events=phba->work_hba_events;
247 spin_unlock_irq(phba->host->host_lock);
248
249 if (ha_copy & HA_ERATT)
250 lpfc_handle_eratt(phba);
251
252 if (ha_copy & HA_MBATT)
253 lpfc_sli_handle_mb_event(phba);
254
255 if (ha_copy & HA_LATT)
256 lpfc_handle_latt(phba);
257
258 if (work_hba_events & WORKER_DISC_TMO)
259 lpfc_disc_timeout_handler(phba);
260
261 if (work_hba_events & WORKER_ELS_TMO)
262 lpfc_els_timeout_handler(phba);
263
264 if (work_hba_events & WORKER_MBOX_TMO)
265 lpfc_mbox_timeout_handler(phba);
266
267 if (work_hba_events & WORKER_FDMI_TMO)
268 lpfc_fdmi_tmo_handler(phba);
269
270 spin_lock_irq(phba->host->host_lock);
271 phba->work_hba_events &= ~work_hba_events;
272 spin_unlock_irq(phba->host->host_lock);
273
274 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
275 pring = &phba->sli.ring[i];
276 if ((ha_copy & HA_RXATT)
277 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
278 if (pring->flag & LPFC_STOP_IOCB_MASK) {
279 pring->flag |= LPFC_DEFERRED_RING_EVENT;
280 } else {
281 lpfc_sli_handle_slow_ring_event(phba, pring,
282 (ha_copy &
283 HA_RXMASK));
284 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
285 }
286 /*
287 * Turn on Ring interrupts
288 */
289 spin_lock_irq(phba->host->host_lock);
290 control = readl(phba->HCregaddr);
291 control |= (HC_R0INT_ENA << i);
292 writel(control, phba->HCregaddr);
293 readl(phba->HCregaddr); /* flush */
294 spin_unlock_irq(phba->host->host_lock);
295 }
296 }
297
298 lpfc_work_list_done (phba);
299
300 }
301
302 static int
303 check_work_wait_done(struct lpfc_hba *phba) {
304
305 spin_lock_irq(phba->host->host_lock);
306 if (phba->work_ha ||
307 phba->work_hba_events ||
308 (!list_empty(&phba->work_list)) ||
309 kthread_should_stop()) {
310 spin_unlock_irq(phba->host->host_lock);
311 return 1;
312 } else {
313 spin_unlock_irq(phba->host->host_lock);
314 return 0;
315 }
316 }
317
318 int
319 lpfc_do_work(void *p)
320 {
321 struct lpfc_hba *phba = p;
322 int rc;
323 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
324
325 set_user_nice(current, -20);
326 phba->work_wait = &work_waitq;
327
328 while (1) {
329
330 rc = wait_event_interruptible(work_waitq,
331 check_work_wait_done(phba));
332 BUG_ON(rc);
333
334 if (kthread_should_stop())
335 break;
336
337 lpfc_work_done(phba);
338
339 }
340 phba->work_wait = NULL;
341 return 0;
342 }
343
344 /*
345 * This is only called to handle FC worker events. Since this a rare
346 * occurance, we allocate a struct lpfc_work_evt structure here instead of
347 * embedding it in the IOCB.
348 */
349 int
350 lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
351 uint32_t evt)
352 {
353 struct lpfc_work_evt *evtp;
354
355 /*
356 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
357 * be queued to worker thread for processing
358 */
359 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
360 if (!evtp)
361 return 0;
362
363 evtp->evt_arg1 = arg1;
364 evtp->evt_arg2 = arg2;
365 evtp->evt = evt;
366
367 spin_lock_irq(phba->host->host_lock);
368 list_add_tail(&evtp->evt_listp, &phba->work_list);
369 if (phba->work_wait)
370 wake_up(phba->work_wait);
371 spin_unlock_irq(phba->host->host_lock);
372
373 return 1;
374 }
375
376 int
377 lpfc_linkdown(struct lpfc_hba *phba)
378 {
379 struct lpfc_sli *psli;
380 struct lpfc_nodelist *ndlp, *next_ndlp;
381 LPFC_MBOXQ_t *mb;
382 int rc;
383
384 psli = &phba->sli;
385 /* sysfs or selective reset may call this routine to clean up */
386 if (phba->hba_state >= LPFC_LINK_DOWN) {
387 if (phba->hba_state == LPFC_LINK_DOWN)
388 return 0;
389
390 spin_lock_irq(phba->host->host_lock);
391 phba->hba_state = LPFC_LINK_DOWN;
392 spin_unlock_irq(phba->host->host_lock);
393 }
394
395 fc_host_post_event(phba->host, fc_get_event_number(),
396 FCH_EVT_LINKDOWN, 0);
397
398 /* Clean up any firmware default rpi's */
399 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
400 lpfc_unreg_did(phba, 0xffffffff, mb);
401 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
402 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
403 == MBX_NOT_FINISHED) {
404 mempool_free( mb, phba->mbox_mem_pool);
405 }
406 }
407
408 /* Cleanup any outstanding RSCN activity */
409 lpfc_els_flush_rscn(phba);
410
411 /* Cleanup any outstanding ELS commands */
412 lpfc_els_flush_cmd(phba);
413
414 /*
415 * Issue a LINK DOWN event to all nodes.
416 */
417 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
418 /* free any ndlp's on unused list */
419 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
420 lpfc_drop_node(phba, ndlp);
421 else /* otherwise, force node recovery. */
422 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
423 NLP_EVT_DEVICE_RECOVERY);
424 }
425
426 /* Setup myDID for link up if we are in pt2pt mode */
427 if (phba->fc_flag & FC_PT2PT) {
428 phba->fc_myDID = 0;
429 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
430 lpfc_config_link(phba, mb);
431 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
432 if (lpfc_sli_issue_mbox
433 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
434 == MBX_NOT_FINISHED) {
435 mempool_free( mb, phba->mbox_mem_pool);
436 }
437 }
438 spin_lock_irq(phba->host->host_lock);
439 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
440 spin_unlock_irq(phba->host->host_lock);
441 }
442 spin_lock_irq(phba->host->host_lock);
443 phba->fc_flag &= ~FC_LBIT;
444 spin_unlock_irq(phba->host->host_lock);
445
446 /* Turn off discovery timer if its running */
447 lpfc_can_disctmo(phba);
448
449 /* Must process IOCBs on all rings to handle ABORTed I/Os */
450 return 0;
451 }
452
453 static int
454 lpfc_linkup(struct lpfc_hba *phba)
455 {
456 struct lpfc_nodelist *ndlp, *next_ndlp;
457
458 fc_host_post_event(phba->host, fc_get_event_number(),
459 FCH_EVT_LINKUP, 0);
460
461 spin_lock_irq(phba->host->host_lock);
462 phba->hba_state = LPFC_LINK_UP;
463 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
464 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
465 phba->fc_flag |= FC_NDISC_ACTIVE;
466 phba->fc_ns_retry = 0;
467 spin_unlock_irq(phba->host->host_lock);
468
469
470 if (phba->fc_flag & FC_LBIT) {
471 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
472 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
473 if (ndlp->nlp_type & NLP_FABRIC) {
474 /*
475 * On Linkup its safe to clean up the
476 * ndlp from Fabric connections.
477 */
478 lpfc_nlp_set_state(phba, ndlp,
479 NLP_STE_UNUSED_NODE);
480 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
481 /*
482 * Fail outstanding IO now since
483 * device is marked for PLOGI.
484 */
485 lpfc_unreg_rpi(phba, ndlp);
486 }
487 }
488 }
489 }
490
491 /* free any ndlp's on unused list */
492 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
493 nlp_listp) {
494 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
495 lpfc_drop_node(phba, ndlp);
496 }
497
498 return 0;
499 }
500
501 /*
502 * This routine handles processing a CLEAR_LA mailbox
503 * command upon completion. It is setup in the LPFC_MBOXQ
504 * as the completion routine when the command is
505 * handed off to the SLI layer.
506 */
507 void
508 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
509 {
510 struct lpfc_sli *psli;
511 MAILBOX_t *mb;
512 uint32_t control;
513
514 psli = &phba->sli;
515 mb = &pmb->mb;
516 /* Since we don't do discovery right now, turn these off here */
517 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
518 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
519 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
520
521 /* Check for error */
522 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
523 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
525 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
526 "state x%x\n",
527 phba->brd_no, mb->mbxStatus, phba->hba_state);
528
529 phba->hba_state = LPFC_HBA_ERROR;
530 goto out;
531 }
532
533 if (phba->fc_flag & FC_ABORT_DISCOVERY)
534 goto out;
535
536 phba->num_disc_nodes = 0;
537 /* go thru NPR list and issue ELS PLOGIs */
538 if (phba->fc_npr_cnt) {
539 lpfc_els_disc_plogi(phba);
540 }
541
542 if (!phba->num_disc_nodes) {
543 spin_lock_irq(phba->host->host_lock);
544 phba->fc_flag &= ~FC_NDISC_ACTIVE;
545 spin_unlock_irq(phba->host->host_lock);
546 }
547
548 phba->hba_state = LPFC_HBA_READY;
549
550 out:
551 /* Device Discovery completes */
552 lpfc_printf_log(phba,
553 KERN_INFO,
554 LOG_DISCOVERY,
555 "%d:0225 Device Discovery completes\n",
556 phba->brd_no);
557
558 mempool_free( pmb, phba->mbox_mem_pool);
559
560 spin_lock_irq(phba->host->host_lock);
561 phba->fc_flag &= ~FC_ABORT_DISCOVERY;
562 if (phba->fc_flag & FC_ESTABLISH_LINK) {
563 phba->fc_flag &= ~FC_ESTABLISH_LINK;
564 }
565 spin_unlock_irq(phba->host->host_lock);
566
567 del_timer_sync(&phba->fc_estabtmo);
568
569 lpfc_can_disctmo(phba);
570
571 /* turn on Link Attention interrupts */
572 spin_lock_irq(phba->host->host_lock);
573 psli->sli_flag |= LPFC_PROCESS_LA;
574 control = readl(phba->HCregaddr);
575 control |= HC_LAINT_ENA;
576 writel(control, phba->HCregaddr);
577 readl(phba->HCregaddr); /* flush */
578 spin_unlock_irq(phba->host->host_lock);
579
580 return;
581 }
582
583 static void
584 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
585 {
586 struct lpfc_sli *psli = &phba->sli;
587 int rc;
588
589 if (pmb->mb.mbxStatus)
590 goto out;
591
592 mempool_free(pmb, phba->mbox_mem_pool);
593
594 if (phba->fc_topology == TOPOLOGY_LOOP &&
595 phba->fc_flag & FC_PUBLIC_LOOP &&
596 !(phba->fc_flag & FC_LBIT)) {
597 /* Need to wait for FAN - use discovery timer
598 * for timeout. hba_state is identically
599 * LPFC_LOCAL_CFG_LINK while waiting for FAN
600 */
601 lpfc_set_disctmo(phba);
602 return;
603 }
604
605 /* Start discovery by sending a FLOGI. hba_state is identically
606 * LPFC_FLOGI while waiting for FLOGI cmpl
607 */
608 phba->hba_state = LPFC_FLOGI;
609 lpfc_set_disctmo(phba);
610 lpfc_initial_flogi(phba);
611 return;
612
613 out:
614 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
615 "%d:0306 CONFIG_LINK mbxStatus error x%x "
616 "HBA state x%x\n",
617 phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
618
619 lpfc_linkdown(phba);
620
621 phba->hba_state = LPFC_HBA_ERROR;
622
623 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
624 "%d:0200 CONFIG_LINK bad hba state x%x\n",
625 phba->brd_no, phba->hba_state);
626
627 lpfc_clear_la(phba, pmb);
628 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
629 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
630 if (rc == MBX_NOT_FINISHED) {
631 mempool_free(pmb, phba->mbox_mem_pool);
632 lpfc_disc_flush_list(phba);
633 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
634 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
635 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
636 phba->hba_state = LPFC_HBA_READY;
637 }
638 return;
639 }
640
641 static void
642 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
643 {
644 struct lpfc_sli *psli = &phba->sli;
645 MAILBOX_t *mb = &pmb->mb;
646 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
647
648
649 /* Check for error */
650 if (mb->mbxStatus) {
651 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
652 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
653 "%d:0319 READ_SPARAM mbxStatus error x%x "
654 "hba state x%x>\n",
655 phba->brd_no, mb->mbxStatus, phba->hba_state);
656
657 lpfc_linkdown(phba);
658 phba->hba_state = LPFC_HBA_ERROR;
659 goto out;
660 }
661
662 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
663 sizeof (struct serv_parm));
664 if (phba->cfg_soft_wwnn)
665 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
666 if (phba->cfg_soft_wwpn)
667 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
668 memcpy((uint8_t *) & phba->fc_nodename,
669 (uint8_t *) & phba->fc_sparam.nodeName,
670 sizeof (struct lpfc_name));
671 memcpy((uint8_t *) & phba->fc_portname,
672 (uint8_t *) & phba->fc_sparam.portName,
673 sizeof (struct lpfc_name));
674 lpfc_mbuf_free(phba, mp->virt, mp->phys);
675 kfree(mp);
676 mempool_free( pmb, phba->mbox_mem_pool);
677 return;
678
679 out:
680 pmb->context1 = NULL;
681 lpfc_mbuf_free(phba, mp->virt, mp->phys);
682 kfree(mp);
683 if (phba->hba_state != LPFC_CLEAR_LA) {
684 lpfc_clear_la(phba, pmb);
685 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
686 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
687 == MBX_NOT_FINISHED) {
688 mempool_free( pmb, phba->mbox_mem_pool);
689 lpfc_disc_flush_list(phba);
690 psli->ring[(psli->extra_ring)].flag &=
691 ~LPFC_STOP_IOCB_EVENT;
692 psli->ring[(psli->fcp_ring)].flag &=
693 ~LPFC_STOP_IOCB_EVENT;
694 psli->ring[(psli->next_ring)].flag &=
695 ~LPFC_STOP_IOCB_EVENT;
696 phba->hba_state = LPFC_HBA_READY;
697 }
698 } else {
699 mempool_free( pmb, phba->mbox_mem_pool);
700 }
701 return;
702 }
703
704 static void
705 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
706 {
707 int i;
708 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
709 struct lpfc_dmabuf *mp;
710 int rc;
711
712 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
713 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
714
715 spin_lock_irq(phba->host->host_lock);
716 switch (la->UlnkSpeed) {
717 case LA_1GHZ_LINK:
718 phba->fc_linkspeed = LA_1GHZ_LINK;
719 break;
720 case LA_2GHZ_LINK:
721 phba->fc_linkspeed = LA_2GHZ_LINK;
722 break;
723 case LA_4GHZ_LINK:
724 phba->fc_linkspeed = LA_4GHZ_LINK;
725 break;
726 case LA_8GHZ_LINK:
727 phba->fc_linkspeed = LA_8GHZ_LINK;
728 break;
729 default:
730 phba->fc_linkspeed = LA_UNKNW_LINK;
731 break;
732 }
733
734 phba->fc_topology = la->topology;
735
736 if (phba->fc_topology == TOPOLOGY_LOOP) {
737 /* Get Loop Map information */
738
739 if (la->il)
740 phba->fc_flag |= FC_LBIT;
741
742 phba->fc_myDID = la->granted_AL_PA;
743 i = la->un.lilpBde64.tus.f.bdeSize;
744
745 if (i == 0) {
746 phba->alpa_map[0] = 0;
747 } else {
748 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
749 int numalpa, j, k;
750 union {
751 uint8_t pamap[16];
752 struct {
753 uint32_t wd1;
754 uint32_t wd2;
755 uint32_t wd3;
756 uint32_t wd4;
757 } pa;
758 } un;
759 numalpa = phba->alpa_map[0];
760 j = 0;
761 while (j < numalpa) {
762 memset(un.pamap, 0, 16);
763 for (k = 1; j < numalpa; k++) {
764 un.pamap[k - 1] =
765 phba->alpa_map[j + 1];
766 j++;
767 if (k == 16)
768 break;
769 }
770 /* Link Up Event ALPA map */
771 lpfc_printf_log(phba,
772 KERN_WARNING,
773 LOG_LINK_EVENT,
774 "%d:1304 Link Up Event "
775 "ALPA map Data: x%x "
776 "x%x x%x x%x\n",
777 phba->brd_no,
778 un.pa.wd1, un.pa.wd2,
779 un.pa.wd3, un.pa.wd4);
780 }
781 }
782 }
783 } else {
784 phba->fc_myDID = phba->fc_pref_DID;
785 phba->fc_flag |= FC_LBIT;
786 }
787 spin_unlock_irq(phba->host->host_lock);
788
789 lpfc_linkup(phba);
790 if (sparam_mbox) {
791 lpfc_read_sparam(phba, sparam_mbox);
792 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
793 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
794 (MBX_NOWAIT | MBX_STOP_IOCB));
795 if (rc == MBX_NOT_FINISHED) {
796 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
797 lpfc_mbuf_free(phba, mp->virt, mp->phys);
798 kfree(mp);
799 mempool_free(sparam_mbox, phba->mbox_mem_pool);
800 if (cfglink_mbox)
801 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
802 return;
803 }
804 }
805
806 if (cfglink_mbox) {
807 phba->hba_state = LPFC_LOCAL_CFG_LINK;
808 lpfc_config_link(phba, cfglink_mbox);
809 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
810 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
811 (MBX_NOWAIT | MBX_STOP_IOCB));
812 if (rc == MBX_NOT_FINISHED)
813 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
814 }
815 }
816
817 static void
818 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
819 uint32_t control;
820 struct lpfc_sli *psli = &phba->sli;
821
822 lpfc_linkdown(phba);
823
824 /* turn on Link Attention interrupts - no CLEAR_LA needed */
825 spin_lock_irq(phba->host->host_lock);
826 psli->sli_flag |= LPFC_PROCESS_LA;
827 control = readl(phba->HCregaddr);
828 control |= HC_LAINT_ENA;
829 writel(control, phba->HCregaddr);
830 readl(phba->HCregaddr); /* flush */
831 spin_unlock_irq(phba->host->host_lock);
832 }
833
834 /*
835 * This routine handles processing a READ_LA mailbox
836 * command upon completion. It is setup in the LPFC_MBOXQ
837 * as the completion routine when the command is
838 * handed off to the SLI layer.
839 */
840 void
841 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
842 {
843 READ_LA_VAR *la;
844 MAILBOX_t *mb = &pmb->mb;
845 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
846
847 /* Check for error */
848 if (mb->mbxStatus) {
849 lpfc_printf_log(phba,
850 KERN_INFO,
851 LOG_LINK_EVENT,
852 "%d:1307 READ_LA mbox error x%x state x%x\n",
853 phba->brd_no,
854 mb->mbxStatus, phba->hba_state);
855 lpfc_mbx_issue_link_down(phba);
856 phba->hba_state = LPFC_HBA_ERROR;
857 goto lpfc_mbx_cmpl_read_la_free_mbuf;
858 }
859
860 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
861
862 memcpy(&phba->alpa_map[0], mp->virt, 128);
863
864 spin_lock_irq(phba->host->host_lock);
865 if (la->pb)
866 phba->fc_flag |= FC_BYPASSED_MODE;
867 else
868 phba->fc_flag &= ~FC_BYPASSED_MODE;
869 spin_unlock_irq(phba->host->host_lock);
870
871 if (((phba->fc_eventTag + 1) < la->eventTag) ||
872 (phba->fc_eventTag == la->eventTag)) {
873 phba->fc_stat.LinkMultiEvent++;
874 if (la->attType == AT_LINK_UP) {
875 if (phba->fc_eventTag != 0)
876 lpfc_linkdown(phba);
877 }
878 }
879
880 phba->fc_eventTag = la->eventTag;
881
882 if (la->attType == AT_LINK_UP) {
883 phba->fc_stat.LinkUp++;
884 if (phba->fc_flag & FC_LOOPBACK_MODE) {
885 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
886 "%d:1306 Link Up Event in loop back mode "
887 "x%x received Data: x%x x%x x%x x%x\n",
888 phba->brd_no, la->eventTag, phba->fc_eventTag,
889 la->granted_AL_PA, la->UlnkSpeed,
890 phba->alpa_map[0]);
891 } else {
892 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
893 "%d:1303 Link Up Event x%x received "
894 "Data: x%x x%x x%x x%x\n",
895 phba->brd_no, la->eventTag, phba->fc_eventTag,
896 la->granted_AL_PA, la->UlnkSpeed,
897 phba->alpa_map[0]);
898 }
899 lpfc_mbx_process_link_up(phba, la);
900 } else {
901 phba->fc_stat.LinkDown++;
902 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
903 "%d:1305 Link Down Event x%x received "
904 "Data: x%x x%x x%x\n",
905 phba->brd_no, la->eventTag, phba->fc_eventTag,
906 phba->hba_state, phba->fc_flag);
907 lpfc_mbx_issue_link_down(phba);
908 }
909
910 lpfc_mbx_cmpl_read_la_free_mbuf:
911 lpfc_mbuf_free(phba, mp->virt, mp->phys);
912 kfree(mp);
913 mempool_free(pmb, phba->mbox_mem_pool);
914 return;
915 }
916
917 /*
918 * This routine handles processing a REG_LOGIN mailbox
919 * command upon completion. It is setup in the LPFC_MBOXQ
920 * as the completion routine when the command is
921 * handed off to the SLI layer.
922 */
923 void
924 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
925 {
926 struct lpfc_sli *psli;
927 MAILBOX_t *mb;
928 struct lpfc_dmabuf *mp;
929 struct lpfc_nodelist *ndlp;
930
931 psli = &phba->sli;
932 mb = &pmb->mb;
933
934 ndlp = (struct lpfc_nodelist *) pmb->context2;
935 mp = (struct lpfc_dmabuf *) (pmb->context1);
936
937 pmb->context1 = NULL;
938
939 /* Good status, call state machine */
940 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
941 lpfc_mbuf_free(phba, mp->virt, mp->phys);
942 kfree(mp);
943 mempool_free( pmb, phba->mbox_mem_pool);
944 lpfc_nlp_put(ndlp);
945
946 return;
947 }
948
949 /*
950 * This routine handles processing a Fabric REG_LOGIN mailbox
951 * command upon completion. It is setup in the LPFC_MBOXQ
952 * as the completion routine when the command is
953 * handed off to the SLI layer.
954 */
955 void
956 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
957 {
958 struct lpfc_sli *psli;
959 MAILBOX_t *mb;
960 struct lpfc_dmabuf *mp;
961 struct lpfc_nodelist *ndlp;
962 struct lpfc_nodelist *ndlp_fdmi;
963
964
965 psli = &phba->sli;
966 mb = &pmb->mb;
967
968 ndlp = (struct lpfc_nodelist *) pmb->context2;
969 mp = (struct lpfc_dmabuf *) (pmb->context1);
970
971 pmb->context1 = NULL;
972 pmb->context2 = NULL;
973
974 if (mb->mbxStatus) {
975 lpfc_mbuf_free(phba, mp->virt, mp->phys);
976 kfree(mp);
977 mempool_free(pmb, phba->mbox_mem_pool);
978 lpfc_nlp_put(ndlp);
979
980 /* FLOGI failed, so just use loop map to make discovery list */
981 lpfc_disc_list_loopmap(phba);
982
983 /* Start discovery */
984 lpfc_disc_start(phba);
985 return;
986 }
987
988 ndlp->nlp_rpi = mb->un.varWords[0];
989 ndlp->nlp_type |= NLP_FABRIC;
990 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
991
992 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
993
994 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
995 /* This NPort has been assigned an NPort_ID by the fabric as a
996 * result of the completed fabric login. Issue a State Change
997 * Registration (SCR) ELS request to the fabric controller
998 * (SCR_DID) so that this NPort gets RSCN events from the
999 * fabric.
1000 */
1001 lpfc_issue_els_scr(phba, SCR_DID, 0);
1002
1003 ndlp = lpfc_findnode_did(phba, NameServer_DID);
1004 if (!ndlp) {
1005 /* Allocate a new node instance. If the pool is empty,
1006 * start the discovery process and skip the Nameserver
1007 * login process. This is attempted again later on.
1008 * Otherwise, issue a Port Login (PLOGI) to NameServer.
1009 */
1010 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1011 if (!ndlp) {
1012 lpfc_disc_start(phba);
1013 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1014 kfree(mp);
1015 mempool_free(pmb, phba->mbox_mem_pool);
1016 return;
1017 } else {
1018 lpfc_nlp_init(phba, ndlp, NameServer_DID);
1019 ndlp->nlp_type |= NLP_FABRIC;
1020 }
1021 }
1022 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1023 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
1024 if (phba->cfg_fdmi_on) {
1025 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
1026 GFP_KERNEL);
1027 if (ndlp_fdmi) {
1028 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
1029 ndlp_fdmi->nlp_type |= NLP_FABRIC;
1030 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
1031 lpfc_issue_els_plogi(phba, FDMI_DID, 0);
1032 }
1033 }
1034 }
1035
1036 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1037 kfree(mp);
1038 mempool_free(pmb, phba->mbox_mem_pool);
1039 return;
1040 }
1041
1042 /*
1043 * This routine handles processing a NameServer REG_LOGIN mailbox
1044 * command upon completion. It is setup in the LPFC_MBOXQ
1045 * as the completion routine when the command is
1046 * handed off to the SLI layer.
1047 */
1048 void
1049 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1050 {
1051 struct lpfc_sli *psli;
1052 MAILBOX_t *mb;
1053 struct lpfc_dmabuf *mp;
1054 struct lpfc_nodelist *ndlp;
1055
1056 psli = &phba->sli;
1057 mb = &pmb->mb;
1058
1059 ndlp = (struct lpfc_nodelist *) pmb->context2;
1060 mp = (struct lpfc_dmabuf *) (pmb->context1);
1061
1062 if (mb->mbxStatus) {
1063 lpfc_nlp_put(ndlp);
1064 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1065 kfree(mp);
1066 mempool_free(pmb, phba->mbox_mem_pool);
1067 lpfc_drop_node(phba, ndlp);
1068
1069 /* RegLogin failed, so just use loop map to make discovery
1070 list */
1071 lpfc_disc_list_loopmap(phba);
1072
1073 /* Start discovery */
1074 lpfc_disc_start(phba);
1075 return;
1076 }
1077
1078 pmb->context1 = NULL;
1079
1080 ndlp->nlp_rpi = mb->un.varWords[0];
1081 ndlp->nlp_type |= NLP_FABRIC;
1082 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1083
1084 if (phba->hba_state < LPFC_HBA_READY) {
1085 /* Link up discovery requires Fabrib registration. */
1086 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1087 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1088 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1089 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
1090 }
1091
1092 phba->fc_ns_retry = 0;
1093 /* Good status, issue CT Request to NameServer */
1094 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1095 /* Cannot issue NameServer Query, so finish up discovery */
1096 lpfc_disc_start(phba);
1097 }
1098
1099 lpfc_nlp_put(ndlp);
1100 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1101 kfree(mp);
1102 mempool_free( pmb, phba->mbox_mem_pool);
1103
1104 return;
1105 }
1106
1107 static void
1108 lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1109 {
1110 struct fc_rport *rport;
1111 struct lpfc_rport_data *rdata;
1112 struct fc_rport_identifiers rport_ids;
1113
1114 /* Remote port has reappeared. Re-register w/ FC transport */
1115 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1116 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1117 rport_ids.port_id = ndlp->nlp_DID;
1118 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1119
1120 /*
1121 * We leave our node pointer in rport->dd_data when we unregister a
1122 * FCP target port. But fc_remote_port_add zeros the space to which
1123 * rport->dd_data points. So, if we're reusing a previously
1124 * registered port, drop the reference that we took the last time we
1125 * registered the port.
1126 */
1127 if (ndlp->rport && ndlp->rport->dd_data &&
1128 *(struct lpfc_rport_data **) ndlp->rport->dd_data) {
1129 lpfc_nlp_put(ndlp);
1130 }
1131 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1132 if (!rport || !get_device(&rport->dev)) {
1133 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1134 "Warning: fc_remote_port_add failed\n");
1135 return;
1136 }
1137
1138 /* initialize static port data */
1139 rport->maxframe_size = ndlp->nlp_maxframe;
1140 rport->supported_classes = ndlp->nlp_class_sup;
1141 rdata = rport->dd_data;
1142 rdata->pnode = lpfc_nlp_get(ndlp);
1143
1144 if (ndlp->nlp_type & NLP_FCP_TARGET)
1145 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1146 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1147 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1148
1149
1150 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1151 fc_remote_port_rolechg(rport, rport_ids.roles);
1152
1153 if ((rport->scsi_target_id != -1) &&
1154 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1155 ndlp->nlp_sid = rport->scsi_target_id;
1156 }
1157
1158 return;
1159 }
1160
1161 static void
1162 lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1163 {
1164 struct fc_rport *rport = ndlp->rport;
1165 struct lpfc_rport_data *rdata = rport->dd_data;
1166
1167 if (rport->scsi_target_id == -1) {
1168 ndlp->rport = NULL;
1169 rdata->pnode = NULL;
1170 lpfc_nlp_put(ndlp);
1171 put_device(&rport->dev);
1172 }
1173
1174 fc_remote_port_delete(rport);
1175
1176 return;
1177 }
1178
1179 static void
1180 lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count)
1181 {
1182 spin_lock_irq(phba->host->host_lock);
1183 switch (state) {
1184 case NLP_STE_UNUSED_NODE:
1185 phba->fc_unused_cnt += count;
1186 break;
1187 case NLP_STE_PLOGI_ISSUE:
1188 phba->fc_plogi_cnt += count;
1189 break;
1190 case NLP_STE_ADISC_ISSUE:
1191 phba->fc_adisc_cnt += count;
1192 break;
1193 case NLP_STE_REG_LOGIN_ISSUE:
1194 phba->fc_reglogin_cnt += count;
1195 break;
1196 case NLP_STE_PRLI_ISSUE:
1197 phba->fc_prli_cnt += count;
1198 break;
1199 case NLP_STE_UNMAPPED_NODE:
1200 phba->fc_unmap_cnt += count;
1201 break;
1202 case NLP_STE_MAPPED_NODE:
1203 phba->fc_map_cnt += count;
1204 break;
1205 case NLP_STE_NPR_NODE:
1206 phba->fc_npr_cnt += count;
1207 break;
1208 }
1209 spin_unlock_irq(phba->host->host_lock);
1210 }
1211
1212 static void
1213 lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1214 int old_state, int new_state)
1215 {
1216 if (new_state == NLP_STE_UNMAPPED_NODE) {
1217 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1218 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1219 ndlp->nlp_type |= NLP_FC_NODE;
1220 }
1221 if (new_state == NLP_STE_MAPPED_NODE)
1222 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1223 if (new_state == NLP_STE_NPR_NODE)
1224 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1225
1226 /* Transport interface */
1227 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1228 old_state == NLP_STE_UNMAPPED_NODE)) {
1229 phba->nport_event_cnt++;
1230 lpfc_unregister_remote_port(phba, ndlp);
1231 }
1232
1233 if (new_state == NLP_STE_MAPPED_NODE ||
1234 new_state == NLP_STE_UNMAPPED_NODE) {
1235 phba->nport_event_cnt++;
1236 /*
1237 * Tell the fc transport about the port, if we haven't
1238 * already. If we have, and it's a scsi entity, be
1239 * sure to unblock any attached scsi devices
1240 */
1241 lpfc_register_remote_port(phba, ndlp);
1242 }
1243
1244 /*
1245 * if we added to Mapped list, but the remote port
1246 * registration failed or assigned a target id outside
1247 * our presentable range - move the node to the
1248 * Unmapped List
1249 */
1250 if (new_state == NLP_STE_MAPPED_NODE &&
1251 (!ndlp->rport ||
1252 ndlp->rport->scsi_target_id == -1 ||
1253 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1254 spin_lock_irq(phba->host->host_lock);
1255 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1256 spin_unlock_irq(phba->host->host_lock);
1257 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1258 }
1259 }
1260
1261 static char *
1262 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1263 {
1264 static char *states[] = {
1265 [NLP_STE_UNUSED_NODE] = "UNUSED",
1266 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1267 [NLP_STE_ADISC_ISSUE] = "ADISC",
1268 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1269 [NLP_STE_PRLI_ISSUE] = "PRLI",
1270 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1271 [NLP_STE_MAPPED_NODE] = "MAPPED",
1272 [NLP_STE_NPR_NODE] = "NPR",
1273 };
1274
1275 if (state < ARRAY_SIZE(states) && states[state])
1276 strlcpy(buffer, states[state], size);
1277 else
1278 snprintf(buffer, size, "unknown (%d)", state);
1279 return buffer;
1280 }
1281
1282 void
1283 lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state)
1284 {
1285 int old_state = ndlp->nlp_state;
1286 char name1[16], name2[16];
1287
1288 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1289 "%d:0904 NPort state transition x%06x, %s -> %s\n",
1290 phba->brd_no,
1291 ndlp->nlp_DID,
1292 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1293 lpfc_nlp_state_name(name2, sizeof(name2), state));
1294 if (old_state == NLP_STE_NPR_NODE &&
1295 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1296 state != NLP_STE_NPR_NODE)
1297 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1298 if (old_state == NLP_STE_UNMAPPED_NODE) {
1299 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1300 ndlp->nlp_type &= ~NLP_FC_NODE;
1301 }
1302
1303 if (list_empty(&ndlp->nlp_listp)) {
1304 spin_lock_irq(phba->host->host_lock);
1305 list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes);
1306 spin_unlock_irq(phba->host->host_lock);
1307 } else if (old_state)
1308 lpfc_nlp_counters(phba, old_state, -1);
1309
1310 ndlp->nlp_state = state;
1311 lpfc_nlp_counters(phba, state, 1);
1312 lpfc_nlp_state_cleanup(phba, ndlp, old_state, state);
1313 }
1314
1315 void
1316 lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1317 {
1318 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1319 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1320 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1321 lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
1322 spin_lock_irq(phba->host->host_lock);
1323 list_del_init(&ndlp->nlp_listp);
1324 spin_unlock_irq(phba->host->host_lock);
1325 lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0);
1326 }
1327
1328 void
1329 lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1330 {
1331 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1332 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1333 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1334 lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
1335 spin_lock_irq(phba->host->host_lock);
1336 list_del_init(&ndlp->nlp_listp);
1337 spin_unlock_irq(phba->host->host_lock);
1338 lpfc_nlp_put(ndlp);
1339 }
1340
1341 /*
1342 * Start / ReStart rescue timer for Discovery / RSCN handling
1343 */
1344 void
1345 lpfc_set_disctmo(struct lpfc_hba * phba)
1346 {
1347 uint32_t tmo;
1348
1349 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
1350 /* For FAN, timeout should be greater then edtov */
1351 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1352 } else {
1353 /* Normal discovery timeout should be > then ELS/CT timeout
1354 * FC spec states we need 3 * ratov for CT requests
1355 */
1356 tmo = ((phba->fc_ratov * 3) + 3);
1357 }
1358
1359 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1360 spin_lock_irq(phba->host->host_lock);
1361 phba->fc_flag |= FC_DISC_TMO;
1362 spin_unlock_irq(phba->host->host_lock);
1363
1364 /* Start Discovery Timer state <hba_state> */
1365 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1366 "%d:0247 Start Discovery Timer state x%x "
1367 "Data: x%x x%lx x%x x%x\n",
1368 phba->brd_no,
1369 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1370 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1371
1372 return;
1373 }
1374
1375 /*
1376 * Cancel rescue timer for Discovery / RSCN handling
1377 */
1378 int
1379 lpfc_can_disctmo(struct lpfc_hba * phba)
1380 {
1381 /* Turn off discovery timer if its running */
1382 if (phba->fc_flag & FC_DISC_TMO) {
1383 spin_lock_irq(phba->host->host_lock);
1384 phba->fc_flag &= ~FC_DISC_TMO;
1385 spin_unlock_irq(phba->host->host_lock);
1386 del_timer_sync(&phba->fc_disctmo);
1387 phba->work_hba_events &= ~WORKER_DISC_TMO;
1388 }
1389
1390 /* Cancel Discovery Timer state <hba_state> */
1391 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1392 "%d:0248 Cancel Discovery Timer state x%x "
1393 "Data: x%x x%x x%x\n",
1394 phba->brd_no, phba->hba_state, phba->fc_flag,
1395 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1396
1397 return 0;
1398 }
1399
1400 /*
1401 * Check specified ring for outstanding IOCB on the SLI queue
1402 * Return true if iocb matches the specified nport
1403 */
1404 int
1405 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1406 struct lpfc_sli_ring * pring,
1407 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1408 {
1409 struct lpfc_sli *psli;
1410 IOCB_t *icmd;
1411
1412 psli = &phba->sli;
1413 icmd = &iocb->iocb;
1414 if (pring->ringno == LPFC_ELS_RING) {
1415 switch (icmd->ulpCommand) {
1416 case CMD_GEN_REQUEST64_CR:
1417 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1418 return 1;
1419 case CMD_ELS_REQUEST64_CR:
1420 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1421 return 1;
1422 case CMD_XMIT_ELS_RSP64_CX:
1423 if (iocb->context1 == (uint8_t *) ndlp)
1424 return 1;
1425 }
1426 } else if (pring->ringno == psli->extra_ring) {
1427
1428 } else if (pring->ringno == psli->fcp_ring) {
1429 /* Skip match check if waiting to relogin to FCP target */
1430 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1431 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1432 return 0;
1433 }
1434 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1435 return 1;
1436 }
1437 } else if (pring->ringno == psli->next_ring) {
1438
1439 }
1440 return 0;
1441 }
1442
1443 /*
1444 * Free resources / clean up outstanding I/Os
1445 * associated with nlp_rpi in the LPFC_NODELIST entry.
1446 */
1447 static int
1448 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1449 {
1450 LIST_HEAD(completions);
1451 struct lpfc_sli *psli;
1452 struct lpfc_sli_ring *pring;
1453 struct lpfc_iocbq *iocb, *next_iocb;
1454 IOCB_t *icmd;
1455 uint32_t rpi, i;
1456
1457 /*
1458 * Everything that matches on txcmplq will be returned
1459 * by firmware with a no rpi error.
1460 */
1461 psli = &phba->sli;
1462 rpi = ndlp->nlp_rpi;
1463 if (rpi) {
1464 /* Now process each ring */
1465 for (i = 0; i < psli->num_rings; i++) {
1466 pring = &psli->ring[i];
1467
1468 spin_lock_irq(phba->host->host_lock);
1469 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1470 list) {
1471 /*
1472 * Check to see if iocb matches the nport we are
1473 * looking for
1474 */
1475 if ((lpfc_check_sli_ndlp
1476 (phba, pring, iocb, ndlp))) {
1477 /* It matches, so deque and call compl
1478 with an error */
1479 list_move_tail(&iocb->list,
1480 &completions);
1481 pring->txq_cnt--;
1482 }
1483 }
1484 spin_unlock_irq(phba->host->host_lock);
1485
1486 }
1487 }
1488
1489 while (!list_empty(&completions)) {
1490 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1491 list_del(&iocb->list);
1492
1493 if (iocb->iocb_cmpl) {
1494 icmd = &iocb->iocb;
1495 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1496 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1497 (iocb->iocb_cmpl) (phba, iocb, iocb);
1498 } else
1499 lpfc_sli_release_iocbq(phba, iocb);
1500 }
1501
1502 return 0;
1503 }
1504
1505 /*
1506 * Free rpi associated with LPFC_NODELIST entry.
1507 * This routine is called from lpfc_freenode(), when we are removing
1508 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1509 * LOGO that completes successfully, and we are waiting to PLOGI back
1510 * to the remote NPort. In addition, it is called after we receive
1511 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1512 * we are waiting to PLOGI back to the remote NPort.
1513 */
1514 int
1515 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1516 {
1517 LPFC_MBOXQ_t *mbox;
1518 int rc;
1519
1520 if (ndlp->nlp_rpi) {
1521 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1522 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1523 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1524 rc = lpfc_sli_issue_mbox
1525 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1526 if (rc == MBX_NOT_FINISHED)
1527 mempool_free( mbox, phba->mbox_mem_pool);
1528 }
1529 lpfc_no_rpi(phba, ndlp);
1530 ndlp->nlp_rpi = 0;
1531 return 1;
1532 }
1533 return 0;
1534 }
1535
1536 /*
1537 * Free resources associated with LPFC_NODELIST entry
1538 * so it can be freed.
1539 */
1540 static int
1541 lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1542 {
1543 LPFC_MBOXQ_t *mb;
1544 LPFC_MBOXQ_t *nextmb;
1545 struct lpfc_dmabuf *mp;
1546
1547 /* Cleanup node for NPort <nlp_DID> */
1548 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1549 "%d:0900 Cleanup node for NPort x%x "
1550 "Data: x%x x%x x%x\n",
1551 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1552 ndlp->nlp_state, ndlp->nlp_rpi);
1553
1554 lpfc_dequeue_node(phba, ndlp);
1555
1556 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1557 if ((mb = phba->sli.mbox_active)) {
1558 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1559 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1560 mb->context2 = NULL;
1561 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1562 }
1563 }
1564
1565 spin_lock_irq(phba->host->host_lock);
1566 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1567 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1568 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1569 mp = (struct lpfc_dmabuf *) (mb->context1);
1570 if (mp) {
1571 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1572 kfree(mp);
1573 }
1574 list_del(&mb->list);
1575 mempool_free(mb, phba->mbox_mem_pool);
1576 lpfc_nlp_put(ndlp);
1577 }
1578 }
1579 spin_unlock_irq(phba->host->host_lock);
1580
1581 lpfc_els_abort(phba,ndlp);
1582 spin_lock_irq(phba->host->host_lock);
1583 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1584 spin_unlock_irq(phba->host->host_lock);
1585
1586 ndlp->nlp_last_elscmd = 0;
1587 del_timer_sync(&ndlp->nlp_delayfunc);
1588
1589 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1590 list_del_init(&ndlp->els_retry_evt.evt_listp);
1591
1592 lpfc_unreg_rpi(phba, ndlp);
1593
1594 return 0;
1595 }
1596
1597 /*
1598 * Check to see if we can free the nlp back to the freelist.
1599 * If we are in the middle of using the nlp in the discovery state
1600 * machine, defer the free till we reach the end of the state machine.
1601 */
1602 static void
1603 lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1604 {
1605 struct lpfc_rport_data *rdata;
1606
1607 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1608 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1609 }
1610
1611 lpfc_cleanup_node(phba, ndlp);
1612
1613 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1614 put_device(&ndlp->rport->dev);
1615 rdata = ndlp->rport->dd_data;
1616 rdata->pnode = NULL;
1617 ndlp->rport = NULL;
1618 }
1619 }
1620
1621 static int
1622 lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
1623 {
1624 D_ID mydid;
1625 D_ID ndlpdid;
1626 D_ID matchdid;
1627
1628 if (did == Bcast_DID)
1629 return 0;
1630
1631 if (ndlp->nlp_DID == 0) {
1632 return 0;
1633 }
1634
1635 /* First check for Direct match */
1636 if (ndlp->nlp_DID == did)
1637 return 1;
1638
1639 /* Next check for area/domain identically equals 0 match */
1640 mydid.un.word = phba->fc_myDID;
1641 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1642 return 0;
1643 }
1644
1645 matchdid.un.word = did;
1646 ndlpdid.un.word = ndlp->nlp_DID;
1647 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1648 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1649 (mydid.un.b.area == matchdid.un.b.area)) {
1650 if ((ndlpdid.un.b.domain == 0) &&
1651 (ndlpdid.un.b.area == 0)) {
1652 if (ndlpdid.un.b.id)
1653 return 1;
1654 }
1655 return 0;
1656 }
1657
1658 matchdid.un.word = ndlp->nlp_DID;
1659 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1660 (mydid.un.b.area == ndlpdid.un.b.area)) {
1661 if ((matchdid.un.b.domain == 0) &&
1662 (matchdid.un.b.area == 0)) {
1663 if (matchdid.un.b.id)
1664 return 1;
1665 }
1666 }
1667 }
1668 return 0;
1669 }
1670
1671 /* Search for a nodelist entry */
1672 struct lpfc_nodelist *
1673 lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did)
1674 {
1675 struct lpfc_nodelist *ndlp;
1676 uint32_t data1;
1677
1678 spin_lock_irq(phba->host->host_lock);
1679 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
1680 if (lpfc_matchdid(phba, ndlp, did)) {
1681 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1682 ((uint32_t) ndlp->nlp_xri << 16) |
1683 ((uint32_t) ndlp->nlp_type << 8) |
1684 ((uint32_t) ndlp->nlp_rpi & 0xff));
1685 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1686 "%d:0929 FIND node DID "
1687 " Data: x%p x%x x%x x%x\n",
1688 phba->brd_no,
1689 ndlp, ndlp->nlp_DID,
1690 ndlp->nlp_flag, data1);
1691 spin_unlock_irq(phba->host->host_lock);
1692 return ndlp;
1693 }
1694 }
1695 spin_unlock_irq(phba->host->host_lock);
1696
1697 /* FIND node did <did> NOT FOUND */
1698 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1699 "%d:0932 FIND node did x%x NOT FOUND.\n",
1700 phba->brd_no, did);
1701 return NULL;
1702 }
1703
1704 struct lpfc_nodelist *
1705 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1706 {
1707 struct lpfc_nodelist *ndlp;
1708
1709 ndlp = lpfc_findnode_did(phba, did);
1710 if (!ndlp) {
1711 if ((phba->fc_flag & FC_RSCN_MODE) &&
1712 ((lpfc_rscn_payload_check(phba, did) == 0)))
1713 return NULL;
1714 ndlp = (struct lpfc_nodelist *)
1715 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1716 if (!ndlp)
1717 return NULL;
1718 lpfc_nlp_init(phba, ndlp, did);
1719 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1720 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1721 return ndlp;
1722 }
1723 if (phba->fc_flag & FC_RSCN_MODE) {
1724 if (lpfc_rscn_payload_check(phba, did)) {
1725 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1726
1727 /* Since this node is marked for discovery,
1728 * delay timeout is not needed.
1729 */
1730 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1731 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1732 } else
1733 ndlp = NULL;
1734 } else {
1735 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
1736 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
1737 return NULL;
1738 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1739 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1740 }
1741 return ndlp;
1742 }
1743
1744 /* Build a list of nodes to discover based on the loopmap */
1745 void
1746 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1747 {
1748 int j;
1749 uint32_t alpa, index;
1750
1751 if (phba->hba_state <= LPFC_LINK_DOWN) {
1752 return;
1753 }
1754 if (phba->fc_topology != TOPOLOGY_LOOP) {
1755 return;
1756 }
1757
1758 /* Check for loop map present or not */
1759 if (phba->alpa_map[0]) {
1760 for (j = 1; j <= phba->alpa_map[0]; j++) {
1761 alpa = phba->alpa_map[j];
1762
1763 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1764 continue;
1765 }
1766 lpfc_setup_disc_node(phba, alpa);
1767 }
1768 } else {
1769 /* No alpamap, so try all alpa's */
1770 for (j = 0; j < FC_MAXLOOP; j++) {
1771 /* If cfg_scan_down is set, start from highest
1772 * ALPA (0xef) to lowest (0x1).
1773 */
1774 if (phba->cfg_scan_down)
1775 index = j;
1776 else
1777 index = FC_MAXLOOP - j - 1;
1778 alpa = lpfcAlpaArray[index];
1779 if ((phba->fc_myDID & 0xff) == alpa) {
1780 continue;
1781 }
1782
1783 lpfc_setup_disc_node(phba, alpa);
1784 }
1785 }
1786 return;
1787 }
1788
1789 /* Start Link up / RSCN discovery on NPR list */
1790 void
1791 lpfc_disc_start(struct lpfc_hba * phba)
1792 {
1793 struct lpfc_sli *psli;
1794 LPFC_MBOXQ_t *mbox;
1795 struct lpfc_nodelist *ndlp, *next_ndlp;
1796 uint32_t num_sent;
1797 uint32_t clear_la_pending;
1798 int did_changed;
1799 int rc;
1800
1801 psli = &phba->sli;
1802
1803 if (phba->hba_state <= LPFC_LINK_DOWN) {
1804 return;
1805 }
1806 if (phba->hba_state == LPFC_CLEAR_LA)
1807 clear_la_pending = 1;
1808 else
1809 clear_la_pending = 0;
1810
1811 if (phba->hba_state < LPFC_HBA_READY) {
1812 phba->hba_state = LPFC_DISC_AUTH;
1813 }
1814 lpfc_set_disctmo(phba);
1815
1816 if (phba->fc_prevDID == phba->fc_myDID) {
1817 did_changed = 0;
1818 } else {
1819 did_changed = 1;
1820 }
1821 phba->fc_prevDID = phba->fc_myDID;
1822 phba->num_disc_nodes = 0;
1823
1824 /* Start Discovery state <hba_state> */
1825 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1826 "%d:0202 Start Discovery hba state x%x "
1827 "Data: x%x x%x x%x\n",
1828 phba->brd_no, phba->hba_state, phba->fc_flag,
1829 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1830
1831 /* If our did changed, we MUST do PLOGI */
1832 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
1833 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
1834 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
1835 did_changed) {
1836 spin_lock_irq(phba->host->host_lock);
1837 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1838 spin_unlock_irq(phba->host->host_lock);
1839 }
1840 }
1841
1842 /* First do ADISCs - if any */
1843 num_sent = lpfc_els_disc_adisc(phba);
1844
1845 if (num_sent)
1846 return;
1847
1848 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
1849 /* If we get here, there is nothing to ADISC */
1850 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1851 phba->hba_state = LPFC_CLEAR_LA;
1852 lpfc_clear_la(phba, mbox);
1853 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
1854 rc = lpfc_sli_issue_mbox(phba, mbox,
1855 (MBX_NOWAIT | MBX_STOP_IOCB));
1856 if (rc == MBX_NOT_FINISHED) {
1857 mempool_free( mbox, phba->mbox_mem_pool);
1858 lpfc_disc_flush_list(phba);
1859 psli->ring[(psli->extra_ring)].flag &=
1860 ~LPFC_STOP_IOCB_EVENT;
1861 psli->ring[(psli->fcp_ring)].flag &=
1862 ~LPFC_STOP_IOCB_EVENT;
1863 psli->ring[(psli->next_ring)].flag &=
1864 ~LPFC_STOP_IOCB_EVENT;
1865 phba->hba_state = LPFC_HBA_READY;
1866 }
1867 }
1868 } else {
1869 /* Next do PLOGIs - if any */
1870 num_sent = lpfc_els_disc_plogi(phba);
1871
1872 if (num_sent)
1873 return;
1874
1875 if (phba->fc_flag & FC_RSCN_MODE) {
1876 /* Check to see if more RSCNs came in while we
1877 * were processing this one.
1878 */
1879 if ((phba->fc_rscn_id_cnt == 0) &&
1880 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
1881 spin_lock_irq(phba->host->host_lock);
1882 phba->fc_flag &= ~FC_RSCN_MODE;
1883 spin_unlock_irq(phba->host->host_lock);
1884 } else
1885 lpfc_els_handle_rscn(phba);
1886 }
1887 }
1888 return;
1889 }
1890
1891 /*
1892 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
1893 * ring the match the sppecified nodelist.
1894 */
1895 static void
1896 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1897 {
1898 LIST_HEAD(completions);
1899 struct lpfc_sli *psli;
1900 IOCB_t *icmd;
1901 struct lpfc_iocbq *iocb, *next_iocb;
1902 struct lpfc_sli_ring *pring;
1903
1904 psli = &phba->sli;
1905 pring = &psli->ring[LPFC_ELS_RING];
1906
1907 /* Error matching iocb on txq or txcmplq
1908 * First check the txq.
1909 */
1910 spin_lock_irq(phba->host->host_lock);
1911 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1912 if (iocb->context1 != ndlp) {
1913 continue;
1914 }
1915 icmd = &iocb->iocb;
1916 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
1917 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
1918
1919 list_move_tail(&iocb->list, &completions);
1920 pring->txq_cnt--;
1921 }
1922 }
1923
1924 /* Next check the txcmplq */
1925 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1926 if (iocb->context1 != ndlp) {
1927 continue;
1928 }
1929 icmd = &iocb->iocb;
1930 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
1931 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
1932 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1933 }
1934 }
1935 spin_unlock_irq(phba->host->host_lock);
1936
1937 while (!list_empty(&completions)) {
1938 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1939 list_del(&iocb->list);
1940
1941 if (iocb->iocb_cmpl) {
1942 icmd = &iocb->iocb;
1943 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1944 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1945 (iocb->iocb_cmpl) (phba, iocb, iocb);
1946 } else
1947 lpfc_sli_release_iocbq(phba, iocb);
1948 }
1949
1950 return;
1951 }
1952
1953 void
1954 lpfc_disc_flush_list(struct lpfc_hba * phba)
1955 {
1956 struct lpfc_nodelist *ndlp, *next_ndlp;
1957
1958 if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) {
1959 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
1960 nlp_listp) {
1961 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
1962 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
1963 lpfc_free_tx(phba, ndlp);
1964 lpfc_nlp_put(ndlp);
1965 }
1966 }
1967 }
1968 }
1969
1970 /*****************************************************************************/
1971 /*
1972 * NAME: lpfc_disc_timeout
1973 *
1974 * FUNCTION: Fibre Channel driver discovery timeout routine.
1975 *
1976 * EXECUTION ENVIRONMENT: interrupt only
1977 *
1978 * CALLED FROM:
1979 * Timer function
1980 *
1981 * RETURNS:
1982 * none
1983 */
1984 /*****************************************************************************/
1985 void
1986 lpfc_disc_timeout(unsigned long ptr)
1987 {
1988 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
1989 unsigned long flags = 0;
1990
1991 if (unlikely(!phba))
1992 return;
1993
1994 spin_lock_irqsave(phba->host->host_lock, flags);
1995 if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
1996 phba->work_hba_events |= WORKER_DISC_TMO;
1997 if (phba->work_wait)
1998 wake_up(phba->work_wait);
1999 }
2000 spin_unlock_irqrestore(phba->host->host_lock, flags);
2001 return;
2002 }
2003
2004 static void
2005 lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2006 {
2007 struct lpfc_sli *psli;
2008 struct lpfc_nodelist *ndlp, *next_ndlp;
2009 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2010 int rc, clrlaerr = 0;
2011
2012 if (unlikely(!phba))
2013 return;
2014
2015 if (!(phba->fc_flag & FC_DISC_TMO))
2016 return;
2017
2018 psli = &phba->sli;
2019
2020 spin_lock_irq(phba->host->host_lock);
2021 phba->fc_flag &= ~FC_DISC_TMO;
2022 spin_unlock_irq(phba->host->host_lock);
2023
2024 switch (phba->hba_state) {
2025
2026 case LPFC_LOCAL_CFG_LINK:
2027 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2028 /* FAN timeout */
2029 lpfc_printf_log(phba,
2030 KERN_WARNING,
2031 LOG_DISCOVERY,
2032 "%d:0221 FAN timeout\n",
2033 phba->brd_no);
2034
2035 /* Start discovery by sending FLOGI, clean up old rpis */
2036 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
2037 nlp_listp) {
2038 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2039 continue;
2040 if (ndlp->nlp_type & NLP_FABRIC) {
2041 /* Clean up the ndlp on Fabric connections */
2042 lpfc_drop_node(phba, ndlp);
2043 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2044 /* Fail outstanding IO now since device
2045 * is marked for PLOGI.
2046 */
2047 lpfc_unreg_rpi(phba, ndlp);
2048 }
2049 }
2050 phba->hba_state = LPFC_FLOGI;
2051 lpfc_set_disctmo(phba);
2052 lpfc_initial_flogi(phba);
2053 break;
2054
2055 case LPFC_FLOGI:
2056 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2057 /* Initial FLOGI timeout */
2058 lpfc_printf_log(phba,
2059 KERN_ERR,
2060 LOG_DISCOVERY,
2061 "%d:0222 Initial FLOGI timeout\n",
2062 phba->brd_no);
2063
2064 /* Assume no Fabric and go on with discovery.
2065 * Check for outstanding ELS FLOGI to abort.
2066 */
2067
2068 /* FLOGI failed, so just use loop map to make discovery list */
2069 lpfc_disc_list_loopmap(phba);
2070
2071 /* Start discovery */
2072 lpfc_disc_start(phba);
2073 break;
2074
2075 case LPFC_FABRIC_CFG_LINK:
2076 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2077 NameServer login */
2078 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2079 "%d:0223 Timeout while waiting for NameServer "
2080 "login\n", phba->brd_no);
2081
2082 /* Next look for NameServer ndlp */
2083 ndlp = lpfc_findnode_did(phba, NameServer_DID);
2084 if (ndlp)
2085 lpfc_nlp_put(ndlp);
2086 /* Start discovery */
2087 lpfc_disc_start(phba);
2088 break;
2089
2090 case LPFC_NS_QRY:
2091 /* Check for wait for NameServer Rsp timeout */
2092 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2093 "%d:0224 NameServer Query timeout "
2094 "Data: x%x x%x\n",
2095 phba->brd_no,
2096 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2097
2098 ndlp = lpfc_findnode_did(phba, NameServer_DID);
2099 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2100 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2101 /* Try it one more time */
2102 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2103 if (rc == 0)
2104 break;
2105 }
2106 phba->fc_ns_retry = 0;
2107 }
2108
2109 /* Nothing to authenticate, so CLEAR_LA right now */
2110 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2111 if (!clearlambox) {
2112 clrlaerr = 1;
2113 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2114 "%d:0226 Device Discovery "
2115 "completion error\n",
2116 phba->brd_no);
2117 phba->hba_state = LPFC_HBA_ERROR;
2118 break;
2119 }
2120
2121 phba->hba_state = LPFC_CLEAR_LA;
2122 lpfc_clear_la(phba, clearlambox);
2123 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2124 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2125 (MBX_NOWAIT | MBX_STOP_IOCB));
2126 if (rc == MBX_NOT_FINISHED) {
2127 mempool_free(clearlambox, phba->mbox_mem_pool);
2128 clrlaerr = 1;
2129 break;
2130 }
2131
2132 /* Setup and issue mailbox INITIALIZE LINK command */
2133 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2134 if (!initlinkmbox) {
2135 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2136 "%d:0206 Device Discovery "
2137 "completion error\n",
2138 phba->brd_no);
2139 phba->hba_state = LPFC_HBA_ERROR;
2140 break;
2141 }
2142
2143 lpfc_linkdown(phba);
2144 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2145 phba->cfg_link_speed);
2146 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2147 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2148 (MBX_NOWAIT | MBX_STOP_IOCB));
2149 lpfc_set_loopback_flag(phba);
2150 if (rc == MBX_NOT_FINISHED)
2151 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2152
2153 break;
2154
2155 case LPFC_DISC_AUTH:
2156 /* Node Authentication timeout */
2157 lpfc_printf_log(phba,
2158 KERN_ERR,
2159 LOG_DISCOVERY,
2160 "%d:0227 Node Authentication timeout\n",
2161 phba->brd_no);
2162 lpfc_disc_flush_list(phba);
2163 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2164 if (!clearlambox) {
2165 clrlaerr = 1;
2166 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2167 "%d:0207 Device Discovery "
2168 "completion error\n",
2169 phba->brd_no);
2170 phba->hba_state = LPFC_HBA_ERROR;
2171 break;
2172 }
2173 phba->hba_state = LPFC_CLEAR_LA;
2174 lpfc_clear_la(phba, clearlambox);
2175 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2176 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2177 (MBX_NOWAIT | MBX_STOP_IOCB));
2178 if (rc == MBX_NOT_FINISHED) {
2179 mempool_free(clearlambox, phba->mbox_mem_pool);
2180 clrlaerr = 1;
2181 }
2182 break;
2183
2184 case LPFC_CLEAR_LA:
2185 /* CLEAR LA timeout */
2186 lpfc_printf_log(phba,
2187 KERN_ERR,
2188 LOG_DISCOVERY,
2189 "%d:0228 CLEAR LA timeout\n",
2190 phba->brd_no);
2191 clrlaerr = 1;
2192 break;
2193
2194 case LPFC_HBA_READY:
2195 if (phba->fc_flag & FC_RSCN_MODE) {
2196 lpfc_printf_log(phba,
2197 KERN_ERR,
2198 LOG_DISCOVERY,
2199 "%d:0231 RSCN timeout Data: x%x x%x\n",
2200 phba->brd_no,
2201 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2202
2203 /* Cleanup any outstanding ELS commands */
2204 lpfc_els_flush_cmd(phba);
2205
2206 lpfc_els_flush_rscn(phba);
2207 lpfc_disc_flush_list(phba);
2208 }
2209 break;
2210 }
2211
2212 if (clrlaerr) {
2213 lpfc_disc_flush_list(phba);
2214 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2215 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2216 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2217 phba->hba_state = LPFC_HBA_READY;
2218 }
2219
2220 return;
2221 }
2222
2223 /*
2224 * This routine handles processing a NameServer REG_LOGIN mailbox
2225 * command upon completion. It is setup in the LPFC_MBOXQ
2226 * as the completion routine when the command is
2227 * handed off to the SLI layer.
2228 */
2229 void
2230 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2231 {
2232 struct lpfc_sli *psli;
2233 MAILBOX_t *mb;
2234 struct lpfc_dmabuf *mp;
2235 struct lpfc_nodelist *ndlp;
2236
2237 psli = &phba->sli;
2238 mb = &pmb->mb;
2239
2240 ndlp = (struct lpfc_nodelist *) pmb->context2;
2241 mp = (struct lpfc_dmabuf *) (pmb->context1);
2242
2243 pmb->context1 = NULL;
2244
2245 ndlp->nlp_rpi = mb->un.varWords[0];
2246 ndlp->nlp_type |= NLP_FABRIC;
2247 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
2248
2249 /* Start issuing Fabric-Device Management Interface (FDMI)
2250 * command to 0xfffffa (FDMI well known port)
2251 */
2252 if (phba->cfg_fdmi_on == 1) {
2253 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2254 } else {
2255 /*
2256 * Delay issuing FDMI command if fdmi-on=2
2257 * (supporting RPA/hostnmae)
2258 */
2259 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2260 }
2261
2262 /* Mailbox took a reference to the node */
2263 lpfc_nlp_put(ndlp);
2264 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2265 kfree(mp);
2266 mempool_free(pmb, phba->mbox_mem_pool);
2267
2268 return;
2269 }
2270
2271 static int
2272 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2273 {
2274 uint16_t *rpi = param;
2275
2276 return ndlp->nlp_rpi == *rpi;
2277 }
2278
2279 static int
2280 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2281 {
2282 return memcmp(&ndlp->nlp_portname, param,
2283 sizeof(ndlp->nlp_portname)) == 0;
2284 }
2285
2286 /*
2287 * Search node lists for a remote port matching filter criteria
2288 * Caller needs to hold host_lock before calling this routine.
2289 */
2290 struct lpfc_nodelist *
2291 __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
2292 {
2293 struct lpfc_nodelist *ndlp;
2294
2295 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
2296 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2297 filter(ndlp, param))
2298 return ndlp;
2299 }
2300 return NULL;
2301 }
2302
2303 /*
2304 * Search node lists for a remote port matching filter criteria
2305 * This routine is used when the caller does NOT have host_lock.
2306 */
2307 struct lpfc_nodelist *
2308 lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
2309 {
2310 struct lpfc_nodelist *ndlp;
2311
2312 spin_lock_irq(phba->host->host_lock);
2313 ndlp = __lpfc_find_node(phba, filter, param);
2314 spin_unlock_irq(phba->host->host_lock);
2315 return ndlp;
2316 }
2317
2318 /*
2319 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2320 * returns the node list pointer else return NULL.
2321 */
2322 struct lpfc_nodelist *
2323 __lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi)
2324 {
2325 return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi);
2326 }
2327
2328 struct lpfc_nodelist *
2329 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2330 {
2331 struct lpfc_nodelist *ndlp;
2332
2333 spin_lock_irq(phba->host->host_lock);
2334 ndlp = __lpfc_findnode_rpi(phba, rpi);
2335 spin_unlock_irq(phba->host->host_lock);
2336 return ndlp;
2337 }
2338
2339 /*
2340 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2341 * returns the node list pointer else return NULL.
2342 */
2343 struct lpfc_nodelist *
2344 lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn)
2345 {
2346 struct lpfc_nodelist *ndlp;
2347
2348 spin_lock_irq(phba->host->host_lock);
2349 ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn);
2350 spin_unlock_irq(phba->host->host_lock);
2351 return NULL;
2352 }
2353
2354 void
2355 lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
2356 {
2357 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2358 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2359 init_timer(&ndlp->nlp_delayfunc);
2360 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2361 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2362 ndlp->nlp_DID = did;
2363 ndlp->nlp_phba = phba;
2364 ndlp->nlp_sid = NLP_NO_SID;
2365 INIT_LIST_HEAD(&ndlp->nlp_listp);
2366 kref_init(&ndlp->kref);
2367 return;
2368 }
2369
2370 void
2371 lpfc_nlp_release(struct kref *kref)
2372 {
2373 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2374 kref);
2375 lpfc_nlp_remove(ndlp->nlp_phba, ndlp);
2376 mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool);
2377 }
2378
2379 struct lpfc_nodelist *
2380 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2381 {
2382 if (ndlp)
2383 kref_get(&ndlp->kref);
2384 return ndlp;
2385 }
2386
2387 int
2388 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2389 {
2390 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2391 }