1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray
[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 static void lpfc_disc_timeout_handler(struct lpfc_hba
*);
60 lpfc_terminate_rport_io(struct fc_rport
*rport
)
62 struct lpfc_rport_data
*rdata
;
63 struct lpfc_nodelist
* ndlp
;
64 struct lpfc_hba
*phba
;
66 rdata
= rport
->dd_data
;
70 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
71 printk(KERN_ERR
"Cannot find remote node"
72 " to terminate I/O Data x%x\n",
77 phba
= ndlp
->nlp_phba
;
79 spin_lock_irq(phba
->host
->host_lock
);
80 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
81 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
82 ndlp
->nlp_sid
, 0, 0, LPFC_CTX_TGT
);
84 spin_unlock_irq(phba
->host
->host_lock
);
90 * This function will be called when dev_loss_tmo fire.
93 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
95 struct lpfc_rport_data
*rdata
;
96 struct lpfc_nodelist
* ndlp
;
99 struct lpfc_hba
*phba
;
101 rdata
= rport
->dd_data
;
105 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
106 printk(KERN_ERR
"Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
112 name
= (uint8_t *)&ndlp
->nlp_portname
;
113 phba
= ndlp
->nlp_phba
;
115 spin_lock_irq(phba
->host
->host_lock
);
117 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
119 /* flush the target */
120 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
121 ndlp
->nlp_sid
, 0, 0, LPFC_CTX_TGT
);
123 if (phba
->fc_flag
& FC_UNLOADING
)
126 spin_unlock_irq(phba
->host
->host_lock
);
129 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
130 "%d:0203 Devloss timeout on "
131 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
132 "NPort x%x Data: x%x x%x x%x\n",
134 *name
, *(name
+1), *(name
+2), *(name
+3),
135 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
136 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
137 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
139 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
140 "%d:0204 Devloss timeout on "
141 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
142 "NPort x%x Data: x%x x%x x%x\n",
144 *name
, *(name
+1), *(name
+2), *(name
+3),
145 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
146 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
147 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
150 if (!(phba
->fc_flag
& FC_UNLOADING
) &&
151 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
152 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
))
153 lpfc_disc_state_machine(phba
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
163 lpfc_work_list_done(struct lpfc_hba
* phba
)
165 struct lpfc_work_evt
*evtp
= NULL
;
166 struct lpfc_nodelist
*ndlp
;
169 spin_lock_irq(phba
->host
->host_lock
);
170 while(!list_empty(&phba
->work_list
)) {
171 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
173 spin_unlock_irq(phba
->host
->host_lock
);
176 case LPFC_EVT_ELS_RETRY
:
177 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
178 lpfc_els_retry_delay_handler(ndlp
);
181 case LPFC_EVT_ONLINE
:
182 if (phba
->hba_state
< LPFC_LINK_DOWN
)
183 *(int *)(evtp
->evt_arg1
) = lpfc_online(phba
);
185 *(int *)(evtp
->evt_arg1
) = 0;
186 complete((struct completion
*)(evtp
->evt_arg2
));
188 case LPFC_EVT_OFFLINE_PREP
:
189 if (phba
->hba_state
>= LPFC_LINK_DOWN
)
190 lpfc_offline_prep(phba
);
191 *(int *)(evtp
->evt_arg1
) = 0;
192 complete((struct completion
*)(evtp
->evt_arg2
));
194 case LPFC_EVT_OFFLINE
:
196 lpfc_sli_brdrestart(phba
);
197 *(int *)(evtp
->evt_arg1
) =
198 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
199 lpfc_unblock_mgmt_io(phba
);
200 complete((struct completion
*)(evtp
->evt_arg2
));
202 case LPFC_EVT_WARM_START
:
204 lpfc_reset_barrier(phba
);
205 lpfc_sli_brdreset(phba
);
206 lpfc_hba_down_post(phba
);
207 *(int *)(evtp
->evt_arg1
) =
208 lpfc_sli_brdready(phba
, HS_MBRDY
);
209 lpfc_unblock_mgmt_io(phba
);
210 complete((struct completion
*)(evtp
->evt_arg2
));
214 *(int *)(evtp
->evt_arg1
)
215 = (phba
->stopped
) ? 0 : lpfc_sli_brdkill(phba
);
216 lpfc_unblock_mgmt_io(phba
);
217 complete((struct completion
*)(evtp
->evt_arg2
));
222 spin_lock_irq(phba
->host
->host_lock
);
224 spin_unlock_irq(phba
->host
->host_lock
);
229 lpfc_work_done(struct lpfc_hba
* phba
)
231 struct lpfc_sli_ring
*pring
;
235 uint32_t work_hba_events
;
237 spin_lock_irq(phba
->host
->host_lock
);
238 ha_copy
= phba
->work_ha
;
240 work_hba_events
=phba
->work_hba_events
;
241 spin_unlock_irq(phba
->host
->host_lock
);
243 if (ha_copy
& HA_ERATT
)
244 lpfc_handle_eratt(phba
);
246 if (ha_copy
& HA_MBATT
)
247 lpfc_sli_handle_mb_event(phba
);
249 if (ha_copy
& HA_LATT
)
250 lpfc_handle_latt(phba
);
252 if (work_hba_events
& WORKER_DISC_TMO
)
253 lpfc_disc_timeout_handler(phba
);
255 if (work_hba_events
& WORKER_ELS_TMO
)
256 lpfc_els_timeout_handler(phba
);
258 if (work_hba_events
& WORKER_MBOX_TMO
)
259 lpfc_mbox_timeout_handler(phba
);
261 if (work_hba_events
& WORKER_FDMI_TMO
)
262 lpfc_fdmi_tmo_handler(phba
);
264 spin_lock_irq(phba
->host
->host_lock
);
265 phba
->work_hba_events
&= ~work_hba_events
;
266 spin_unlock_irq(phba
->host
->host_lock
);
268 for (i
= 0; i
< phba
->sli
.num_rings
; i
++, ha_copy
>>= 4) {
269 pring
= &phba
->sli
.ring
[i
];
270 if ((ha_copy
& HA_RXATT
)
271 || (pring
->flag
& LPFC_DEFERRED_RING_EVENT
)) {
272 if (pring
->flag
& LPFC_STOP_IOCB_MASK
) {
273 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
275 lpfc_sli_handle_slow_ring_event(phba
, pring
,
278 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
281 * Turn on Ring interrupts
283 spin_lock_irq(phba
->host
->host_lock
);
284 control
= readl(phba
->HCregaddr
);
285 control
|= (HC_R0INT_ENA
<< i
);
286 writel(control
, phba
->HCregaddr
);
287 readl(phba
->HCregaddr
); /* flush */
288 spin_unlock_irq(phba
->host
->host_lock
);
292 lpfc_work_list_done (phba
);
297 check_work_wait_done(struct lpfc_hba
*phba
) {
299 spin_lock_irq(phba
->host
->host_lock
);
301 phba
->work_hba_events
||
302 (!list_empty(&phba
->work_list
)) ||
303 kthread_should_stop()) {
304 spin_unlock_irq(phba
->host
->host_lock
);
307 spin_unlock_irq(phba
->host
->host_lock
);
313 lpfc_do_work(void *p
)
315 struct lpfc_hba
*phba
= p
;
317 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq
);
319 set_user_nice(current
, -20);
320 phba
->work_wait
= &work_waitq
;
324 rc
= wait_event_interruptible(work_waitq
,
325 check_work_wait_done(phba
));
328 if (kthread_should_stop())
331 lpfc_work_done(phba
);
334 phba
->work_wait
= NULL
;
339 * This is only called to handle FC worker events. Since this a rare
340 * occurance, we allocate a struct lpfc_work_evt structure here instead of
341 * embedding it in the IOCB.
344 lpfc_workq_post_event(struct lpfc_hba
* phba
, void *arg1
, void *arg2
,
347 struct lpfc_work_evt
*evtp
;
350 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
351 * be queued to worker thread for processing
353 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_KERNEL
);
357 evtp
->evt_arg1
= arg1
;
358 evtp
->evt_arg2
= arg2
;
361 spin_lock_irq(phba
->host
->host_lock
);
362 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
364 wake_up(phba
->work_wait
);
365 spin_unlock_irq(phba
->host
->host_lock
);
371 lpfc_linkdown(struct lpfc_hba
* phba
)
373 struct lpfc_sli
*psli
;
374 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
375 struct list_head
*listp
, *node_list
[7];
380 /* sysfs or selective reset may call this routine to clean up */
381 if (phba
->hba_state
>= LPFC_LINK_DOWN
) {
382 if (phba
->hba_state
== LPFC_LINK_DOWN
)
385 spin_lock_irq(phba
->host
->host_lock
);
386 phba
->hba_state
= LPFC_LINK_DOWN
;
387 spin_unlock_irq(phba
->host
->host_lock
);
390 fc_host_post_event(phba
->host
, fc_get_event_number(),
391 FCH_EVT_LINKDOWN
, 0);
393 /* Clean up any firmware default rpi's */
394 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
395 lpfc_unreg_did(phba
, 0xffffffff, mb
);
396 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
397 if (lpfc_sli_issue_mbox(phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
398 == MBX_NOT_FINISHED
) {
399 mempool_free( mb
, phba
->mbox_mem_pool
);
403 /* Cleanup any outstanding RSCN activity */
404 lpfc_els_flush_rscn(phba
);
406 /* Cleanup any outstanding ELS commands */
407 lpfc_els_flush_cmd(phba
);
409 /* Issue a LINK DOWN event to all nodes */
410 node_list
[0] = &phba
->fc_npr_list
; /* MUST do this list first */
411 node_list
[1] = &phba
->fc_nlpmap_list
;
412 node_list
[2] = &phba
->fc_nlpunmap_list
;
413 node_list
[3] = &phba
->fc_prli_list
;
414 node_list
[4] = &phba
->fc_reglogin_list
;
415 node_list
[5] = &phba
->fc_adisc_list
;
416 node_list
[6] = &phba
->fc_plogi_list
;
417 for (i
= 0; i
< 7; i
++) {
418 listp
= node_list
[i
];
419 if (list_empty(listp
))
422 list_for_each_entry_safe(ndlp
, next_ndlp
, listp
, nlp_listp
) {
424 rc
= lpfc_disc_state_machine(phba
, ndlp
, NULL
,
425 NLP_EVT_DEVICE_RECOVERY
);
430 /* free any ndlp's on unused list */
431 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_unused_list
,
433 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
436 /* Setup myDID for link up if we are in pt2pt mode */
437 if (phba
->fc_flag
& FC_PT2PT
) {
439 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
440 lpfc_config_link(phba
, mb
);
441 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
442 if (lpfc_sli_issue_mbox
443 (phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
444 == MBX_NOT_FINISHED
) {
445 mempool_free( mb
, phba
->mbox_mem_pool
);
448 spin_lock_irq(phba
->host
->host_lock
);
449 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
450 spin_unlock_irq(phba
->host
->host_lock
);
452 spin_lock_irq(phba
->host
->host_lock
);
453 phba
->fc_flag
&= ~FC_LBIT
;
454 spin_unlock_irq(phba
->host
->host_lock
);
456 /* Turn off discovery timer if its running */
457 lpfc_can_disctmo(phba
);
459 /* Must process IOCBs on all rings to handle ABORTed I/Os */
464 lpfc_linkup(struct lpfc_hba
* phba
)
466 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
467 struct list_head
*listp
, *node_list
[7];
470 fc_host_post_event(phba
->host
, fc_get_event_number(),
473 spin_lock_irq(phba
->host
->host_lock
);
474 phba
->hba_state
= LPFC_LINK_UP
;
475 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
476 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
477 phba
->fc_flag
|= FC_NDISC_ACTIVE
;
478 phba
->fc_ns_retry
= 0;
479 spin_unlock_irq(phba
->host
->host_lock
);
482 node_list
[0] = &phba
->fc_plogi_list
;
483 node_list
[1] = &phba
->fc_adisc_list
;
484 node_list
[2] = &phba
->fc_reglogin_list
;
485 node_list
[3] = &phba
->fc_prli_list
;
486 node_list
[4] = &phba
->fc_nlpunmap_list
;
487 node_list
[5] = &phba
->fc_nlpmap_list
;
488 node_list
[6] = &phba
->fc_npr_list
;
489 for (i
= 0; i
< 7; i
++) {
490 listp
= node_list
[i
];
491 if (list_empty(listp
))
494 list_for_each_entry_safe(ndlp
, next_ndlp
, listp
, nlp_listp
) {
495 if (phba
->fc_flag
& FC_LBIT
) {
496 if (ndlp
->nlp_type
& NLP_FABRIC
) {
497 /* On Linkup its safe to clean up the
498 * ndlp from Fabric connections.
500 lpfc_nlp_list(phba
, ndlp
,
502 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
503 /* Fail outstanding IO now since device
504 * is marked for PLOGI.
506 lpfc_unreg_rpi(phba
, ndlp
);
512 /* free any ndlp's on unused list */
513 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_unused_list
,
515 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
522 * This routine handles processing a CLEAR_LA mailbox
523 * command upon completion. It is setup in the LPFC_MBOXQ
524 * as the completion routine when the command is
525 * handed off to the SLI layer.
528 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
530 struct lpfc_sli
*psli
;
536 /* Since we don't do discovery right now, turn these off here */
537 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
538 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
539 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
541 /* Check for error */
542 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
543 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
544 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
545 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
547 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
549 phba
->hba_state
= LPFC_HBA_ERROR
;
553 if (phba
->fc_flag
& FC_ABORT_DISCOVERY
)
556 phba
->num_disc_nodes
= 0;
557 /* go thru NPR list and issue ELS PLOGIs */
558 if (phba
->fc_npr_cnt
) {
559 lpfc_els_disc_plogi(phba
);
562 if (!phba
->num_disc_nodes
) {
563 spin_lock_irq(phba
->host
->host_lock
);
564 phba
->fc_flag
&= ~FC_NDISC_ACTIVE
;
565 spin_unlock_irq(phba
->host
->host_lock
);
568 phba
->hba_state
= LPFC_HBA_READY
;
571 /* Device Discovery completes */
572 lpfc_printf_log(phba
,
575 "%d:0225 Device Discovery completes\n",
578 mempool_free( pmb
, phba
->mbox_mem_pool
);
580 spin_lock_irq(phba
->host
->host_lock
);
581 phba
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
582 if (phba
->fc_flag
& FC_ESTABLISH_LINK
) {
583 phba
->fc_flag
&= ~FC_ESTABLISH_LINK
;
585 spin_unlock_irq(phba
->host
->host_lock
);
587 del_timer_sync(&phba
->fc_estabtmo
);
589 lpfc_can_disctmo(phba
);
591 /* turn on Link Attention interrupts */
592 spin_lock_irq(phba
->host
->host_lock
);
593 psli
->sli_flag
|= LPFC_PROCESS_LA
;
594 control
= readl(phba
->HCregaddr
);
595 control
|= HC_LAINT_ENA
;
596 writel(control
, phba
->HCregaddr
);
597 readl(phba
->HCregaddr
); /* flush */
598 spin_unlock_irq(phba
->host
->host_lock
);
604 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
606 struct lpfc_sli
*psli
= &phba
->sli
;
609 if (pmb
->mb
.mbxStatus
)
612 mempool_free(pmb
, phba
->mbox_mem_pool
);
614 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
615 phba
->fc_flag
& FC_PUBLIC_LOOP
&&
616 !(phba
->fc_flag
& FC_LBIT
)) {
617 /* Need to wait for FAN - use discovery timer
618 * for timeout. hba_state is identically
619 * LPFC_LOCAL_CFG_LINK while waiting for FAN
621 lpfc_set_disctmo(phba
);
625 /* Start discovery by sending a FLOGI. hba_state is identically
626 * LPFC_FLOGI while waiting for FLOGI cmpl
628 phba
->hba_state
= LPFC_FLOGI
;
629 lpfc_set_disctmo(phba
);
630 lpfc_initial_flogi(phba
);
634 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
635 "%d:0306 CONFIG_LINK mbxStatus error x%x "
637 phba
->brd_no
, pmb
->mb
.mbxStatus
, phba
->hba_state
);
641 phba
->hba_state
= LPFC_HBA_ERROR
;
643 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
644 "%d:0200 CONFIG_LINK bad hba state x%x\n",
645 phba
->brd_no
, phba
->hba_state
);
647 lpfc_clear_la(phba
, pmb
);
648 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
649 rc
= lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
650 if (rc
== MBX_NOT_FINISHED
) {
651 mempool_free(pmb
, phba
->mbox_mem_pool
);
652 lpfc_disc_flush_list(phba
);
653 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
654 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
655 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
656 phba
->hba_state
= LPFC_HBA_READY
;
662 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
664 struct lpfc_sli
*psli
= &phba
->sli
;
665 MAILBOX_t
*mb
= &pmb
->mb
;
666 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
669 /* Check for error */
671 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
672 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
673 "%d:0319 READ_SPARAM mbxStatus error x%x "
675 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
678 phba
->hba_state
= LPFC_HBA_ERROR
;
682 memcpy((uint8_t *) & phba
->fc_sparam
, (uint8_t *) mp
->virt
,
683 sizeof (struct serv_parm
));
684 if (phba
->cfg_soft_wwnn
)
685 u64_to_wwn(phba
->cfg_soft_wwnn
, phba
->fc_sparam
.nodeName
.u
.wwn
);
686 if (phba
->cfg_soft_wwpn
)
687 u64_to_wwn(phba
->cfg_soft_wwpn
, phba
->fc_sparam
.portName
.u
.wwn
);
688 memcpy((uint8_t *) & phba
->fc_nodename
,
689 (uint8_t *) & phba
->fc_sparam
.nodeName
,
690 sizeof (struct lpfc_name
));
691 memcpy((uint8_t *) & phba
->fc_portname
,
692 (uint8_t *) & phba
->fc_sparam
.portName
,
693 sizeof (struct lpfc_name
));
694 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
696 mempool_free( pmb
, phba
->mbox_mem_pool
);
700 pmb
->context1
= NULL
;
701 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
703 if (phba
->hba_state
!= LPFC_CLEAR_LA
) {
704 lpfc_clear_la(phba
, pmb
);
705 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
706 if (lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
707 == MBX_NOT_FINISHED
) {
708 mempool_free( pmb
, phba
->mbox_mem_pool
);
709 lpfc_disc_flush_list(phba
);
710 psli
->ring
[(psli
->extra_ring
)].flag
&=
711 ~LPFC_STOP_IOCB_EVENT
;
712 psli
->ring
[(psli
->fcp_ring
)].flag
&=
713 ~LPFC_STOP_IOCB_EVENT
;
714 psli
->ring
[(psli
->next_ring
)].flag
&=
715 ~LPFC_STOP_IOCB_EVENT
;
716 phba
->hba_state
= LPFC_HBA_READY
;
719 mempool_free( pmb
, phba
->mbox_mem_pool
);
725 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
728 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
;
729 struct lpfc_dmabuf
*mp
;
732 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
733 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
735 spin_lock_irq(phba
->host
->host_lock
);
736 switch (la
->UlnkSpeed
) {
738 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
741 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
744 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
747 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
751 phba
->fc_topology
= la
->topology
;
753 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
754 /* Get Loop Map information */
757 phba
->fc_flag
|= FC_LBIT
;
759 phba
->fc_myDID
= la
->granted_AL_PA
;
760 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
763 phba
->alpa_map
[0] = 0;
765 if (phba
->cfg_log_verbose
& LOG_LINK_EVENT
) {
776 numalpa
= phba
->alpa_map
[0];
778 while (j
< numalpa
) {
779 memset(un
.pamap
, 0, 16);
780 for (k
= 1; j
< numalpa
; k
++) {
782 phba
->alpa_map
[j
+ 1];
787 /* Link Up Event ALPA map */
788 lpfc_printf_log(phba
,
791 "%d:1304 Link Up Event "
792 "ALPA map Data: x%x "
795 un
.pa
.wd1
, un
.pa
.wd2
,
796 un
.pa
.wd3
, un
.pa
.wd4
);
801 phba
->fc_myDID
= phba
->fc_pref_DID
;
802 phba
->fc_flag
|= FC_LBIT
;
804 spin_unlock_irq(phba
->host
->host_lock
);
808 lpfc_read_sparam(phba
, sparam_mbox
);
809 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
810 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
,
811 (MBX_NOWAIT
| MBX_STOP_IOCB
));
812 if (rc
== MBX_NOT_FINISHED
) {
813 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
814 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
816 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
818 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
824 phba
->hba_state
= LPFC_LOCAL_CFG_LINK
;
825 lpfc_config_link(phba
, cfglink_mbox
);
826 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
827 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
,
828 (MBX_NOWAIT
| MBX_STOP_IOCB
));
829 if (rc
== MBX_NOT_FINISHED
)
830 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
835 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
) {
837 struct lpfc_sli
*psli
= &phba
->sli
;
841 /* turn on Link Attention interrupts - no CLEAR_LA needed */
842 spin_lock_irq(phba
->host
->host_lock
);
843 psli
->sli_flag
|= LPFC_PROCESS_LA
;
844 control
= readl(phba
->HCregaddr
);
845 control
|= HC_LAINT_ENA
;
846 writel(control
, phba
->HCregaddr
);
847 readl(phba
->HCregaddr
); /* flush */
848 spin_unlock_irq(phba
->host
->host_lock
);
852 * This routine handles processing a READ_LA mailbox
853 * command upon completion. It is setup in the LPFC_MBOXQ
854 * as the completion routine when the command is
855 * handed off to the SLI layer.
858 lpfc_mbx_cmpl_read_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
861 MAILBOX_t
*mb
= &pmb
->mb
;
862 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
864 /* Check for error */
866 lpfc_printf_log(phba
,
869 "%d:1307 READ_LA mbox error x%x state x%x\n",
871 mb
->mbxStatus
, phba
->hba_state
);
872 lpfc_mbx_issue_link_down(phba
);
873 phba
->hba_state
= LPFC_HBA_ERROR
;
874 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
877 la
= (READ_LA_VAR
*) & pmb
->mb
.un
.varReadLA
;
879 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
881 spin_lock_irq(phba
->host
->host_lock
);
883 phba
->fc_flag
|= FC_BYPASSED_MODE
;
885 phba
->fc_flag
&= ~FC_BYPASSED_MODE
;
886 spin_unlock_irq(phba
->host
->host_lock
);
888 if (((phba
->fc_eventTag
+ 1) < la
->eventTag
) ||
889 (phba
->fc_eventTag
== la
->eventTag
)) {
890 phba
->fc_stat
.LinkMultiEvent
++;
891 if (la
->attType
== AT_LINK_UP
) {
892 if (phba
->fc_eventTag
!= 0)
897 phba
->fc_eventTag
= la
->eventTag
;
899 if (la
->attType
== AT_LINK_UP
) {
900 phba
->fc_stat
.LinkUp
++;
901 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
902 "%d:1303 Link Up Event x%x received "
903 "Data: x%x x%x x%x x%x\n",
904 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
905 la
->granted_AL_PA
, la
->UlnkSpeed
,
907 lpfc_mbx_process_link_up(phba
, la
);
909 phba
->fc_stat
.LinkDown
++;
910 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
911 "%d:1305 Link Down Event x%x received "
912 "Data: x%x x%x x%x\n",
913 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
914 phba
->hba_state
, phba
->fc_flag
);
915 lpfc_mbx_issue_link_down(phba
);
918 lpfc_mbx_cmpl_read_la_free_mbuf
:
919 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
921 mempool_free(pmb
, phba
->mbox_mem_pool
);
926 * This routine handles processing a REG_LOGIN mailbox
927 * command upon completion. It is setup in the LPFC_MBOXQ
928 * as the completion routine when the command is
929 * handed off to the SLI layer.
932 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
934 struct lpfc_sli
*psli
;
936 struct lpfc_dmabuf
*mp
;
937 struct lpfc_nodelist
*ndlp
;
942 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
943 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
945 pmb
->context1
= NULL
;
947 /* Good status, call state machine */
948 lpfc_disc_state_machine(phba
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
949 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
951 mempool_free( pmb
, phba
->mbox_mem_pool
);
957 * This routine handles processing a Fabric REG_LOGIN mailbox
958 * command upon completion. It is setup in the LPFC_MBOXQ
959 * as the completion routine when the command is
960 * handed off to the SLI layer.
963 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
965 struct lpfc_sli
*psli
;
967 struct lpfc_dmabuf
*mp
;
968 struct lpfc_nodelist
*ndlp
;
969 struct lpfc_nodelist
*ndlp_fdmi
;
975 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
976 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
979 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
981 mempool_free( pmb
, phba
->mbox_mem_pool
);
982 mempool_free( ndlp
, phba
->nlp_mem_pool
);
984 /* FLOGI failed, so just use loop map to make discovery list */
985 lpfc_disc_list_loopmap(phba
);
987 /* Start discovery */
988 lpfc_disc_start(phba
);
992 pmb
->context1
= NULL
;
994 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
995 ndlp
->nlp_type
|= NLP_FABRIC
;
996 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
997 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
999 if (phba
->hba_state
== LPFC_FABRIC_CFG_LINK
) {
1000 /* This NPort has been assigned an NPort_ID by the fabric as a
1001 * result of the completed fabric login. Issue a State Change
1002 * Registration (SCR) ELS request to the fabric controller
1003 * (SCR_DID) so that this NPort gets RSCN events from the
1006 lpfc_issue_els_scr(phba
, SCR_DID
, 0);
1008 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, NameServer_DID
);
1010 /* Allocate a new node instance. If the pool is empty,
1011 * start the discovery process and skip the Nameserver
1012 * login process. This is attempted again later on.
1013 * Otherwise, issue a Port Login (PLOGI) to NameServer.
1015 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_ATOMIC
);
1017 lpfc_disc_start(phba
);
1018 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1020 mempool_free( pmb
, phba
->mbox_mem_pool
);
1023 lpfc_nlp_init(phba
, ndlp
, NameServer_DID
);
1024 ndlp
->nlp_type
|= NLP_FABRIC
;
1027 ndlp
->nlp_state
= NLP_STE_PLOGI_ISSUE
;
1028 lpfc_nlp_list(phba
, ndlp
, NLP_PLOGI_LIST
);
1029 lpfc_issue_els_plogi(phba
, NameServer_DID
, 0);
1030 if (phba
->cfg_fdmi_on
) {
1031 ndlp_fdmi
= mempool_alloc(phba
->nlp_mem_pool
,
1034 lpfc_nlp_init(phba
, ndlp_fdmi
, FDMI_DID
);
1035 ndlp_fdmi
->nlp_type
|= NLP_FABRIC
;
1036 ndlp_fdmi
->nlp_state
= NLP_STE_PLOGI_ISSUE
;
1037 lpfc_issue_els_plogi(phba
, FDMI_DID
, 0);
1042 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1044 mempool_free( pmb
, phba
->mbox_mem_pool
);
1049 * This routine handles processing a NameServer REG_LOGIN mailbox
1050 * command upon completion. It is setup in the LPFC_MBOXQ
1051 * as the completion routine when the command is
1052 * handed off to the SLI layer.
1055 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
1057 struct lpfc_sli
*psli
;
1059 struct lpfc_dmabuf
*mp
;
1060 struct lpfc_nodelist
*ndlp
;
1065 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
1066 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1068 if (mb
->mbxStatus
) {
1069 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1071 mempool_free( pmb
, phba
->mbox_mem_pool
);
1072 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
1074 /* RegLogin failed, so just use loop map to make discovery
1076 lpfc_disc_list_loopmap(phba
);
1078 /* Start discovery */
1079 lpfc_disc_start(phba
);
1083 pmb
->context1
= NULL
;
1085 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
1086 ndlp
->nlp_type
|= NLP_FABRIC
;
1087 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
1088 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
1090 if (phba
->hba_state
< LPFC_HBA_READY
) {
1091 /* Link up discovery requires Fabrib registration. */
1092 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RNN_ID
);
1093 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RSNN_NN
);
1094 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RFT_ID
);
1095 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RFF_ID
);
1098 phba
->fc_ns_retry
= 0;
1099 /* Good status, issue CT Request to NameServer */
1100 if (lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
)) {
1101 /* Cannot issue NameServer Query, so finish up discovery */
1102 lpfc_disc_start(phba
);
1105 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1107 mempool_free( pmb
, phba
->mbox_mem_pool
);
1113 lpfc_register_remote_port(struct lpfc_hba
* phba
,
1114 struct lpfc_nodelist
* ndlp
)
1116 struct fc_rport
*rport
;
1117 struct lpfc_rport_data
*rdata
;
1118 struct fc_rport_identifiers rport_ids
;
1120 /* Remote port has reappeared. Re-register w/ FC transport */
1121 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
1122 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
1123 rport_ids
.port_id
= ndlp
->nlp_DID
;
1124 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
1126 ndlp
->rport
= rport
= fc_remote_port_add(phba
->host
, 0, &rport_ids
);
1128 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
1129 "Warning: fc_remote_port_add failed\n");
1133 /* initialize static port data */
1134 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
1135 rport
->supported_classes
= ndlp
->nlp_class_sup
;
1136 rdata
= rport
->dd_data
;
1137 rdata
->pnode
= ndlp
;
1139 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
1140 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
1141 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
1142 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
1145 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
1146 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
1148 if ((rport
->scsi_target_id
!= -1) &&
1149 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
1150 ndlp
->nlp_sid
= rport
->scsi_target_id
;
1157 lpfc_unregister_remote_port(struct lpfc_hba
* phba
,
1158 struct lpfc_nodelist
* ndlp
)
1160 struct fc_rport
*rport
= ndlp
->rport
;
1161 struct lpfc_rport_data
*rdata
= rport
->dd_data
;
1163 if (rport
->scsi_target_id
== -1) {
1165 rdata
->pnode
= NULL
;
1168 fc_remote_port_delete(rport
);
1174 lpfc_nlp_list(struct lpfc_hba
* phba
, struct lpfc_nodelist
* nlp
, int list
)
1176 enum { none
, unmapped
, mapped
} rport_add
= none
, rport_del
= none
;
1177 struct lpfc_sli
*psli
;
1180 /* Sanity check to ensure we are not moving to / from the same list */
1181 if ((nlp
->nlp_flag
& NLP_LIST_MASK
) == list
)
1182 if (list
!= NLP_NO_LIST
)
1185 spin_lock_irq(phba
->host
->host_lock
);
1186 switch (nlp
->nlp_flag
& NLP_LIST_MASK
) {
1187 case NLP_NO_LIST
: /* Not on any list */
1189 case NLP_UNUSED_LIST
:
1190 phba
->fc_unused_cnt
--;
1191 list_del(&nlp
->nlp_listp
);
1193 case NLP_PLOGI_LIST
:
1194 phba
->fc_plogi_cnt
--;
1195 list_del(&nlp
->nlp_listp
);
1197 case NLP_ADISC_LIST
:
1198 phba
->fc_adisc_cnt
--;
1199 list_del(&nlp
->nlp_listp
);
1201 case NLP_REGLOGIN_LIST
:
1202 phba
->fc_reglogin_cnt
--;
1203 list_del(&nlp
->nlp_listp
);
1206 phba
->fc_prli_cnt
--;
1207 list_del(&nlp
->nlp_listp
);
1209 case NLP_UNMAPPED_LIST
:
1210 phba
->fc_unmap_cnt
--;
1211 list_del(&nlp
->nlp_listp
);
1212 nlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
1213 nlp
->nlp_type
&= ~NLP_FC_NODE
;
1214 phba
->nport_event_cnt
++;
1216 rport_del
= unmapped
;
1218 case NLP_MAPPED_LIST
:
1220 list_del(&nlp
->nlp_listp
);
1221 phba
->nport_event_cnt
++;
1227 list_del(&nlp
->nlp_listp
);
1228 /* Stop delay tmo if taking node off NPR list */
1229 if ((nlp
->nlp_flag
& NLP_DELAY_TMO
) &&
1230 (list
!= NLP_NPR_LIST
)) {
1231 spin_unlock_irq(phba
->host
->host_lock
);
1232 lpfc_cancel_retry_delay_tmo(phba
, nlp
);
1233 spin_lock_irq(phba
->host
->host_lock
);
1238 nlp
->nlp_flag
&= ~NLP_LIST_MASK
;
1240 /* Add NPort <did> to <num> list */
1241 lpfc_printf_log(phba
,
1244 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1246 nlp
->nlp_DID
, list
, nlp
->nlp_flag
);
1249 case NLP_NO_LIST
: /* No list, just remove it */
1250 spin_unlock_irq(phba
->host
->host_lock
);
1251 lpfc_nlp_remove(phba
, nlp
);
1252 spin_lock_irq(phba
->host
->host_lock
);
1253 /* as node removed - stop further transport calls */
1256 case NLP_UNUSED_LIST
:
1257 nlp
->nlp_flag
|= list
;
1258 /* Put it at the end of the unused list */
1259 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_unused_list
);
1260 phba
->fc_unused_cnt
++;
1262 case NLP_PLOGI_LIST
:
1263 nlp
->nlp_flag
|= list
;
1264 /* Put it at the end of the plogi list */
1265 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_plogi_list
);
1266 phba
->fc_plogi_cnt
++;
1268 case NLP_ADISC_LIST
:
1269 nlp
->nlp_flag
|= list
;
1270 /* Put it at the end of the adisc list */
1271 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_adisc_list
);
1272 phba
->fc_adisc_cnt
++;
1274 case NLP_REGLOGIN_LIST
:
1275 nlp
->nlp_flag
|= list
;
1276 /* Put it at the end of the reglogin list */
1277 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_reglogin_list
);
1278 phba
->fc_reglogin_cnt
++;
1281 nlp
->nlp_flag
|= list
;
1282 /* Put it at the end of the prli list */
1283 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_prli_list
);
1284 phba
->fc_prli_cnt
++;
1286 case NLP_UNMAPPED_LIST
:
1287 rport_add
= unmapped
;
1288 /* ensure all vestiges of "mapped" significance are gone */
1289 nlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
1290 nlp
->nlp_flag
|= list
;
1291 /* Put it at the end of the unmap list */
1292 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_nlpunmap_list
);
1293 phba
->fc_unmap_cnt
++;
1294 phba
->nport_event_cnt
++;
1295 nlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1296 nlp
->nlp_type
|= NLP_FC_NODE
;
1298 case NLP_MAPPED_LIST
:
1300 nlp
->nlp_flag
|= list
;
1301 /* Put it at the end of the map list */
1302 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_nlpmap_list
);
1304 phba
->nport_event_cnt
++;
1305 nlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1308 nlp
->nlp_flag
|= list
;
1309 /* Put it at the end of the npr list */
1310 list_add_tail(&nlp
->nlp_listp
, &phba
->fc_npr_list
);
1313 nlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
1319 spin_unlock_irq(phba
->host
->host_lock
);
1322 * We make all the calls into the transport after we have
1323 * moved the node between lists. This so that we don't
1324 * release the lock while in-between lists.
1327 /* Don't upcall midlayer if we're unloading */
1328 if (!(phba
->fc_flag
& FC_UNLOADING
)) {
1330 * We revalidate the rport pointer as the "add" function
1331 * may have removed the remote port.
1333 if ((rport_del
!= none
) && nlp
->rport
)
1334 lpfc_unregister_remote_port(phba
, nlp
);
1336 if (rport_add
!= none
) {
1338 * Tell the fc transport about the port, if we haven't
1339 * already. If we have, and it's a scsi entity, be
1340 * sure to unblock any attached scsi devices
1342 if ((!nlp
->rport
) || (nlp
->rport
->port_state
==
1343 FC_PORTSTATE_BLOCKED
))
1344 lpfc_register_remote_port(phba
, nlp
);
1347 * if we added to Mapped list, but the remote port
1348 * registration failed or assigned a target id outside
1349 * our presentable range - move the node to the
1352 if ((rport_add
== mapped
) &&
1354 (nlp
->rport
->scsi_target_id
== -1) ||
1355 (nlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
))) {
1356 nlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
1357 spin_lock_irq(phba
->host
->host_lock
);
1358 nlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
1359 spin_unlock_irq(phba
->host
->host_lock
);
1360 lpfc_nlp_list(phba
, nlp
, NLP_UNMAPPED_LIST
);
1368 * Start / ReStart rescue timer for Discovery / RSCN handling
1371 lpfc_set_disctmo(struct lpfc_hba
* phba
)
1375 if (phba
->hba_state
== LPFC_LOCAL_CFG_LINK
) {
1376 /* For FAN, timeout should be greater then edtov */
1377 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
1379 /* Normal discovery timeout should be > then ELS/CT timeout
1380 * FC spec states we need 3 * ratov for CT requests
1382 tmo
= ((phba
->fc_ratov
* 3) + 3);
1385 mod_timer(&phba
->fc_disctmo
, jiffies
+ HZ
* tmo
);
1386 spin_lock_irq(phba
->host
->host_lock
);
1387 phba
->fc_flag
|= FC_DISC_TMO
;
1388 spin_unlock_irq(phba
->host
->host_lock
);
1390 /* Start Discovery Timer state <hba_state> */
1391 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1392 "%d:0247 Start Discovery Timer state x%x "
1393 "Data: x%x x%lx x%x x%x\n",
1395 phba
->hba_state
, tmo
, (unsigned long)&phba
->fc_disctmo
,
1396 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1402 * Cancel rescue timer for Discovery / RSCN handling
1405 lpfc_can_disctmo(struct lpfc_hba
* phba
)
1407 /* Turn off discovery timer if its running */
1408 if (phba
->fc_flag
& FC_DISC_TMO
) {
1409 spin_lock_irq(phba
->host
->host_lock
);
1410 phba
->fc_flag
&= ~FC_DISC_TMO
;
1411 spin_unlock_irq(phba
->host
->host_lock
);
1412 del_timer_sync(&phba
->fc_disctmo
);
1413 phba
->work_hba_events
&= ~WORKER_DISC_TMO
;
1416 /* Cancel Discovery Timer state <hba_state> */
1417 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1418 "%d:0248 Cancel Discovery Timer state x%x "
1419 "Data: x%x x%x x%x\n",
1420 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
1421 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1427 * Check specified ring for outstanding IOCB on the SLI queue
1428 * Return true if iocb matches the specified nport
1431 lpfc_check_sli_ndlp(struct lpfc_hba
* phba
,
1432 struct lpfc_sli_ring
* pring
,
1433 struct lpfc_iocbq
* iocb
, struct lpfc_nodelist
* ndlp
)
1435 struct lpfc_sli
*psli
;
1440 if (pring
->ringno
== LPFC_ELS_RING
) {
1441 switch (icmd
->ulpCommand
) {
1442 case CMD_GEN_REQUEST64_CR
:
1443 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
)
1445 case CMD_ELS_REQUEST64_CR
:
1446 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
1448 case CMD_XMIT_ELS_RSP64_CX
:
1449 if (iocb
->context1
== (uint8_t *) ndlp
)
1452 } else if (pring
->ringno
== psli
->extra_ring
) {
1454 } else if (pring
->ringno
== psli
->fcp_ring
) {
1455 /* Skip match check if waiting to relogin to FCP target */
1456 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
1457 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
1460 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
1463 } else if (pring
->ringno
== psli
->next_ring
) {
1470 * Free resources / clean up outstanding I/Os
1471 * associated with nlp_rpi in the LPFC_NODELIST entry.
1474 lpfc_no_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1476 struct lpfc_sli
*psli
;
1477 struct lpfc_sli_ring
*pring
;
1478 struct lpfc_iocbq
*iocb
, *next_iocb
;
1483 * Everything that matches on txcmplq will be returned
1484 * by firmware with a no rpi error.
1487 rpi
= ndlp
->nlp_rpi
;
1489 /* Now process each ring */
1490 for (i
= 0; i
< psli
->num_rings
; i
++) {
1491 pring
= &psli
->ring
[i
];
1493 spin_lock_irq(phba
->host
->host_lock
);
1494 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
1497 * Check to see if iocb matches the nport we are
1500 if ((lpfc_check_sli_ndlp
1501 (phba
, pring
, iocb
, ndlp
))) {
1502 /* It matches, so deque and call compl
1504 list_del(&iocb
->list
);
1506 if (iocb
->iocb_cmpl
) {
1509 IOSTAT_LOCAL_REJECT
;
1510 icmd
->un
.ulpWord
[4] =
1512 spin_unlock_irq(phba
->host
->
1514 (iocb
->iocb_cmpl
) (phba
,
1516 spin_lock_irq(phba
->host
->
1519 lpfc_sli_release_iocbq(phba
,
1523 spin_unlock_irq(phba
->host
->host_lock
);
1531 * Free rpi associated with LPFC_NODELIST entry.
1532 * This routine is called from lpfc_freenode(), when we are removing
1533 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1534 * LOGO that completes successfully, and we are waiting to PLOGI back
1535 * to the remote NPort. In addition, it is called after we receive
1536 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1537 * we are waiting to PLOGI back to the remote NPort.
1540 lpfc_unreg_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1545 if (ndlp
->nlp_rpi
) {
1546 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
1547 lpfc_unreg_login(phba
, ndlp
->nlp_rpi
, mbox
);
1548 mbox
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
1549 rc
= lpfc_sli_issue_mbox
1550 (phba
, mbox
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
1551 if (rc
== MBX_NOT_FINISHED
)
1552 mempool_free( mbox
, phba
->mbox_mem_pool
);
1554 lpfc_no_rpi(phba
, ndlp
);
1562 * Free resources associated with LPFC_NODELIST entry
1563 * so it can be freed.
1566 lpfc_freenode(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1569 LPFC_MBOXQ_t
*nextmb
;
1570 struct lpfc_dmabuf
*mp
;
1572 /* Cleanup node for NPort <nlp_DID> */
1573 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1574 "%d:0900 Cleanup node for NPort x%x "
1575 "Data: x%x x%x x%x\n",
1576 phba
->brd_no
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
1577 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
1579 lpfc_nlp_list(phba
, ndlp
, NLP_JUST_DQ
);
1581 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1582 if ((mb
= phba
->sli
.mbox_active
)) {
1583 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1584 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1585 mb
->context2
= NULL
;
1586 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1590 spin_lock_irq(phba
->host
->host_lock
);
1591 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
1592 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1593 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1594 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
1596 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1599 list_del(&mb
->list
);
1600 mempool_free(mb
, phba
->mbox_mem_pool
);
1603 spin_unlock_irq(phba
->host
->host_lock
);
1605 lpfc_els_abort(phba
,ndlp
);
1606 spin_lock_irq(phba
->host
->host_lock
);
1607 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
1608 spin_unlock_irq(phba
->host
->host_lock
);
1610 ndlp
->nlp_last_elscmd
= 0;
1611 del_timer_sync(&ndlp
->nlp_delayfunc
);
1613 if (!list_empty(&ndlp
->els_retry_evt
.evt_listp
))
1614 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
1616 lpfc_unreg_rpi(phba
, ndlp
);
1622 * Check to see if we can free the nlp back to the freelist.
1623 * If we are in the middle of using the nlp in the discovery state
1624 * machine, defer the free till we reach the end of the state machine.
1627 lpfc_nlp_remove(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1629 struct lpfc_rport_data
*rdata
;
1631 if (ndlp
->nlp_flag
& NLP_DELAY_TMO
) {
1632 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1635 if (ndlp
->nlp_disc_refcnt
) {
1636 spin_lock_irq(phba
->host
->host_lock
);
1637 ndlp
->nlp_flag
|= NLP_DELAY_REMOVE
;
1638 spin_unlock_irq(phba
->host
->host_lock
);
1640 lpfc_freenode(phba
, ndlp
);
1642 if ((ndlp
->rport
) && !(phba
->fc_flag
& FC_UNLOADING
)) {
1643 rdata
= ndlp
->rport
->dd_data
;
1644 rdata
->pnode
= NULL
;
1648 mempool_free( ndlp
, phba
->nlp_mem_pool
);
1654 lpfc_matchdid(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
, uint32_t did
)
1660 if (did
== Bcast_DID
)
1663 if (ndlp
->nlp_DID
== 0) {
1667 /* First check for Direct match */
1668 if (ndlp
->nlp_DID
== did
)
1671 /* Next check for area/domain identically equals 0 match */
1672 mydid
.un
.word
= phba
->fc_myDID
;
1673 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
1677 matchdid
.un
.word
= did
;
1678 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
1679 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
1680 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
1681 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
1682 if ((ndlpdid
.un
.b
.domain
== 0) &&
1683 (ndlpdid
.un
.b
.area
== 0)) {
1684 if (ndlpdid
.un
.b
.id
)
1690 matchdid
.un
.word
= ndlp
->nlp_DID
;
1691 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
1692 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
1693 if ((matchdid
.un
.b
.domain
== 0) &&
1694 (matchdid
.un
.b
.area
== 0)) {
1695 if (matchdid
.un
.b
.id
)
1703 /* Search for a nodelist entry on a specific list */
1704 struct lpfc_nodelist
*
1705 lpfc_findnode_did(struct lpfc_hba
* phba
, uint32_t order
, uint32_t did
)
1707 struct lpfc_nodelist
*ndlp
;
1708 struct list_head
*lists
[]={&phba
->fc_nlpunmap_list
,
1709 &phba
->fc_nlpmap_list
,
1710 &phba
->fc_plogi_list
,
1711 &phba
->fc_adisc_list
,
1712 &phba
->fc_reglogin_list
,
1713 &phba
->fc_prli_list
,
1715 &phba
->fc_unused_list
};
1716 uint32_t search
[]={NLP_SEARCH_UNMAPPED
,
1720 NLP_SEARCH_REGLOGIN
,
1727 spin_lock_irq(phba
->host
->host_lock
);
1728 for (i
= 0; i
< ARRAY_SIZE(lists
); i
++ ) {
1729 if (!(order
& search
[i
]))
1731 list_for_each_entry(ndlp
, lists
[i
], nlp_listp
) {
1732 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1733 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1734 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1735 ((uint32_t) ndlp
->nlp_type
<< 8) |
1736 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1737 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1738 "%d:0929 FIND node DID "
1739 " Data: x%p x%x x%x x%x\n",
1741 ndlp
, ndlp
->nlp_DID
,
1742 ndlp
->nlp_flag
, data1
);
1743 spin_unlock_irq(phba
->host
->host_lock
);
1748 spin_unlock_irq(phba
->host
->host_lock
);
1750 /* FIND node did <did> NOT FOUND */
1751 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1752 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1753 phba
->brd_no
, did
, order
);
1757 struct lpfc_nodelist
*
1758 lpfc_setup_disc_node(struct lpfc_hba
* phba
, uint32_t did
)
1760 struct lpfc_nodelist
*ndlp
;
1763 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, did
);
1765 if ((phba
->fc_flag
& FC_RSCN_MODE
) &&
1766 ((lpfc_rscn_payload_check(phba
, did
) == 0)))
1768 ndlp
= (struct lpfc_nodelist
*)
1769 mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
1772 lpfc_nlp_init(phba
, ndlp
, did
);
1773 ndlp
->nlp_state
= NLP_STE_NPR_NODE
;
1774 lpfc_nlp_list(phba
, ndlp
, NLP_NPR_LIST
);
1775 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1778 if (phba
->fc_flag
& FC_RSCN_MODE
) {
1779 if (lpfc_rscn_payload_check(phba
, did
)) {
1780 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1782 /* Since this node is marked for discovery,
1783 * delay timeout is not needed.
1785 if (ndlp
->nlp_flag
& NLP_DELAY_TMO
)
1786 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1790 flg
= ndlp
->nlp_flag
& NLP_LIST_MASK
;
1791 if ((flg
== NLP_ADISC_LIST
) || (flg
== NLP_PLOGI_LIST
))
1793 ndlp
->nlp_state
= NLP_STE_NPR_NODE
;
1794 lpfc_nlp_list(phba
, ndlp
, NLP_NPR_LIST
);
1795 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1800 /* Build a list of nodes to discover based on the loopmap */
1802 lpfc_disc_list_loopmap(struct lpfc_hba
* phba
)
1805 uint32_t alpa
, index
;
1807 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1810 if (phba
->fc_topology
!= TOPOLOGY_LOOP
) {
1814 /* Check for loop map present or not */
1815 if (phba
->alpa_map
[0]) {
1816 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
1817 alpa
= phba
->alpa_map
[j
];
1819 if (((phba
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0)) {
1822 lpfc_setup_disc_node(phba
, alpa
);
1825 /* No alpamap, so try all alpa's */
1826 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
1827 /* If cfg_scan_down is set, start from highest
1828 * ALPA (0xef) to lowest (0x1).
1830 if (phba
->cfg_scan_down
)
1833 index
= FC_MAXLOOP
- j
- 1;
1834 alpa
= lpfcAlpaArray
[index
];
1835 if ((phba
->fc_myDID
& 0xff) == alpa
) {
1839 lpfc_setup_disc_node(phba
, alpa
);
1845 /* Start Link up / RSCN discovery on NPR list */
1847 lpfc_disc_start(struct lpfc_hba
* phba
)
1849 struct lpfc_sli
*psli
;
1851 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1852 uint32_t did_changed
, num_sent
;
1853 uint32_t clear_la_pending
;
1858 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1861 if (phba
->hba_state
== LPFC_CLEAR_LA
)
1862 clear_la_pending
= 1;
1864 clear_la_pending
= 0;
1866 if (phba
->hba_state
< LPFC_HBA_READY
) {
1867 phba
->hba_state
= LPFC_DISC_AUTH
;
1869 lpfc_set_disctmo(phba
);
1871 if (phba
->fc_prevDID
== phba
->fc_myDID
) {
1876 phba
->fc_prevDID
= phba
->fc_myDID
;
1877 phba
->num_disc_nodes
= 0;
1879 /* Start Discovery state <hba_state> */
1880 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1881 "%d:0202 Start Discovery hba state x%x "
1882 "Data: x%x x%x x%x\n",
1883 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
1884 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1886 /* If our did changed, we MUST do PLOGI */
1887 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
1889 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
1891 spin_lock_irq(phba
->host
->host_lock
);
1892 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
1893 spin_unlock_irq(phba
->host
->host_lock
);
1898 /* First do ADISCs - if any */
1899 num_sent
= lpfc_els_disc_adisc(phba
);
1904 if ((phba
->hba_state
< LPFC_HBA_READY
) && (!clear_la_pending
)) {
1905 /* If we get here, there is nothing to ADISC */
1906 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
1907 phba
->hba_state
= LPFC_CLEAR_LA
;
1908 lpfc_clear_la(phba
, mbox
);
1909 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
1910 rc
= lpfc_sli_issue_mbox(phba
, mbox
,
1911 (MBX_NOWAIT
| MBX_STOP_IOCB
));
1912 if (rc
== MBX_NOT_FINISHED
) {
1913 mempool_free( mbox
, phba
->mbox_mem_pool
);
1914 lpfc_disc_flush_list(phba
);
1915 psli
->ring
[(psli
->extra_ring
)].flag
&=
1916 ~LPFC_STOP_IOCB_EVENT
;
1917 psli
->ring
[(psli
->fcp_ring
)].flag
&=
1918 ~LPFC_STOP_IOCB_EVENT
;
1919 psli
->ring
[(psli
->next_ring
)].flag
&=
1920 ~LPFC_STOP_IOCB_EVENT
;
1921 phba
->hba_state
= LPFC_HBA_READY
;
1925 /* Next do PLOGIs - if any */
1926 num_sent
= lpfc_els_disc_plogi(phba
);
1931 if (phba
->fc_flag
& FC_RSCN_MODE
) {
1932 /* Check to see if more RSCNs came in while we
1933 * were processing this one.
1935 if ((phba
->fc_rscn_id_cnt
== 0) &&
1936 (!(phba
->fc_flag
& FC_RSCN_DISCOVERY
))) {
1937 spin_lock_irq(phba
->host
->host_lock
);
1938 phba
->fc_flag
&= ~FC_RSCN_MODE
;
1939 spin_unlock_irq(phba
->host
->host_lock
);
1941 lpfc_els_handle_rscn(phba
);
1948 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
1949 * ring the match the sppecified nodelist.
1952 lpfc_free_tx(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1954 struct lpfc_sli
*psli
;
1956 struct lpfc_iocbq
*iocb
, *next_iocb
;
1957 struct lpfc_sli_ring
*pring
;
1958 struct lpfc_dmabuf
*mp
;
1961 pring
= &psli
->ring
[LPFC_ELS_RING
];
1963 /* Error matching iocb on txq or txcmplq
1964 * First check the txq.
1966 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
1967 if (iocb
->context1
!= ndlp
) {
1971 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
1972 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
1974 list_del(&iocb
->list
);
1976 lpfc_els_free_iocb(phba
, iocb
);
1980 /* Next check the txcmplq */
1981 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
1982 if (iocb
->context1
!= ndlp
) {
1986 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
1987 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
1989 iocb
->iocb_cmpl
= NULL
;
1990 /* context2 = cmd, context2->next = rsp, context3 =
1992 if (iocb
->context2
) {
1993 /* Free the response IOCB before handling the
1996 mp
= (struct lpfc_dmabuf
*) (iocb
->context2
);
1997 mp
= list_get_first(&mp
->list
,
2001 /* Delay before releasing rsp buffer to
2002 * give UNREG mbox a chance to take
2006 &phba
->freebufList
);
2008 lpfc_mbuf_free(phba
,
2009 ((struct lpfc_dmabuf
*)
2010 iocb
->context2
)->virt
,
2011 ((struct lpfc_dmabuf
*)
2012 iocb
->context2
)->phys
);
2013 kfree(iocb
->context2
);
2016 if (iocb
->context3
) {
2017 lpfc_mbuf_free(phba
,
2018 ((struct lpfc_dmabuf
*)
2019 iocb
->context3
)->virt
,
2020 ((struct lpfc_dmabuf
*)
2021 iocb
->context3
)->phys
);
2022 kfree(iocb
->context3
);
2031 lpfc_disc_flush_list(struct lpfc_hba
* phba
)
2033 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2035 if (phba
->fc_plogi_cnt
) {
2036 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_plogi_list
,
2038 lpfc_free_tx(phba
, ndlp
);
2039 lpfc_nlp_remove(phba
, ndlp
);
2042 if (phba
->fc_adisc_cnt
) {
2043 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_adisc_list
,
2045 lpfc_free_tx(phba
, ndlp
);
2046 lpfc_nlp_remove(phba
, ndlp
);
2052 /*****************************************************************************/
2054 * NAME: lpfc_disc_timeout
2056 * FUNCTION: Fibre Channel driver discovery timeout routine.
2058 * EXECUTION ENVIRONMENT: interrupt only
2066 /*****************************************************************************/
2068 lpfc_disc_timeout(unsigned long ptr
)
2070 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
2071 unsigned long flags
= 0;
2073 if (unlikely(!phba
))
2076 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2077 if (!(phba
->work_hba_events
& WORKER_DISC_TMO
)) {
2078 phba
->work_hba_events
|= WORKER_DISC_TMO
;
2079 if (phba
->work_wait
)
2080 wake_up(phba
->work_wait
);
2082 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2087 lpfc_disc_timeout_handler(struct lpfc_hba
*phba
)
2089 struct lpfc_sli
*psli
;
2090 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2091 LPFC_MBOXQ_t
*clearlambox
, *initlinkmbox
;
2092 int rc
, clrlaerr
= 0;
2094 if (unlikely(!phba
))
2097 if (!(phba
->fc_flag
& FC_DISC_TMO
))
2102 spin_lock_irq(phba
->host
->host_lock
);
2103 phba
->fc_flag
&= ~FC_DISC_TMO
;
2104 spin_unlock_irq(phba
->host
->host_lock
);
2106 switch (phba
->hba_state
) {
2108 case LPFC_LOCAL_CFG_LINK
:
2109 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2111 lpfc_printf_log(phba
,
2114 "%d:0221 FAN timeout\n",
2117 /* Start discovery by sending FLOGI, clean up old rpis */
2118 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_npr_list
,
2120 if (ndlp
->nlp_type
& NLP_FABRIC
) {
2121 /* Clean up the ndlp on Fabric connections */
2122 lpfc_nlp_list(phba
, ndlp
, NLP_NO_LIST
);
2123 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
2124 /* Fail outstanding IO now since device
2125 * is marked for PLOGI.
2127 lpfc_unreg_rpi(phba
, ndlp
);
2130 phba
->hba_state
= LPFC_FLOGI
;
2131 lpfc_set_disctmo(phba
);
2132 lpfc_initial_flogi(phba
);
2136 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2137 /* Initial FLOGI timeout */
2138 lpfc_printf_log(phba
,
2141 "%d:0222 Initial FLOGI timeout\n",
2144 /* Assume no Fabric and go on with discovery.
2145 * Check for outstanding ELS FLOGI to abort.
2148 /* FLOGI failed, so just use loop map to make discovery list */
2149 lpfc_disc_list_loopmap(phba
);
2151 /* Start discovery */
2152 lpfc_disc_start(phba
);
2155 case LPFC_FABRIC_CFG_LINK
:
2156 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2158 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2159 "%d:0223 Timeout while waiting for NameServer "
2160 "login\n", phba
->brd_no
);
2162 /* Next look for NameServer ndlp */
2163 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_ALL
, NameServer_DID
);
2165 lpfc_nlp_remove(phba
, ndlp
);
2166 /* Start discovery */
2167 lpfc_disc_start(phba
);
2171 /* Check for wait for NameServer Rsp timeout */
2172 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2173 "%d:0224 NameServer Query timeout "
2176 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2178 ndlp
= lpfc_findnode_did(phba
, NLP_SEARCH_UNMAPPED
,
2181 if (phba
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
2182 /* Try it one more time */
2183 rc
= lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
);
2187 phba
->fc_ns_retry
= 0;
2190 /* Nothing to authenticate, so CLEAR_LA right now */
2191 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2194 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2195 "%d:0226 Device Discovery "
2196 "completion error\n",
2198 phba
->hba_state
= LPFC_HBA_ERROR
;
2202 phba
->hba_state
= LPFC_CLEAR_LA
;
2203 lpfc_clear_la(phba
, clearlambox
);
2204 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2205 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2206 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2207 if (rc
== MBX_NOT_FINISHED
) {
2208 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2213 /* Setup and issue mailbox INITIALIZE LINK command */
2214 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2215 if (!initlinkmbox
) {
2216 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2217 "%d:0206 Device Discovery "
2218 "completion error\n",
2220 phba
->hba_state
= LPFC_HBA_ERROR
;
2224 lpfc_linkdown(phba
);
2225 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
2226 phba
->cfg_link_speed
);
2227 initlinkmbox
->mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
2228 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
,
2229 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2230 if (rc
== MBX_NOT_FINISHED
)
2231 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
2235 case LPFC_DISC_AUTH
:
2236 /* Node Authentication timeout */
2237 lpfc_printf_log(phba
,
2240 "%d:0227 Node Authentication timeout\n",
2242 lpfc_disc_flush_list(phba
);
2243 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2246 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2247 "%d:0207 Device Discovery "
2248 "completion error\n",
2250 phba
->hba_state
= LPFC_HBA_ERROR
;
2253 phba
->hba_state
= LPFC_CLEAR_LA
;
2254 lpfc_clear_la(phba
, clearlambox
);
2255 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2256 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2257 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2258 if (rc
== MBX_NOT_FINISHED
) {
2259 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2265 /* CLEAR LA timeout */
2266 lpfc_printf_log(phba
,
2269 "%d:0228 CLEAR LA timeout\n",
2274 case LPFC_HBA_READY
:
2275 if (phba
->fc_flag
& FC_RSCN_MODE
) {
2276 lpfc_printf_log(phba
,
2279 "%d:0231 RSCN timeout Data: x%x x%x\n",
2281 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2283 /* Cleanup any outstanding ELS commands */
2284 lpfc_els_flush_cmd(phba
);
2286 lpfc_els_flush_rscn(phba
);
2287 lpfc_disc_flush_list(phba
);
2293 lpfc_disc_flush_list(phba
);
2294 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2295 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2296 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2297 phba
->hba_state
= LPFC_HBA_READY
;
2304 * This routine handles processing a NameServer REG_LOGIN mailbox
2305 * command upon completion. It is setup in the LPFC_MBOXQ
2306 * as the completion routine when the command is
2307 * handed off to the SLI layer.
2310 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
2312 struct lpfc_sli
*psli
;
2314 struct lpfc_dmabuf
*mp
;
2315 struct lpfc_nodelist
*ndlp
;
2320 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2321 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2323 pmb
->context1
= NULL
;
2325 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2326 ndlp
->nlp_type
|= NLP_FABRIC
;
2327 ndlp
->nlp_state
= NLP_STE_UNMAPPED_NODE
;
2328 lpfc_nlp_list(phba
, ndlp
, NLP_UNMAPPED_LIST
);
2330 /* Start issuing Fabric-Device Management Interface (FDMI)
2331 * command to 0xfffffa (FDMI well known port)
2333 if (phba
->cfg_fdmi_on
== 1) {
2334 lpfc_fdmi_cmd(phba
, ndlp
, SLI_MGMT_DHBA
);
2337 * Delay issuing FDMI command if fdmi-on=2
2338 * (supporting RPA/hostnmae)
2340 mod_timer(&phba
->fc_fdmitmo
, jiffies
+ HZ
* 60);
2343 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2345 mempool_free( pmb
, phba
->mbox_mem_pool
);
2351 * This routine looks up the ndlp lists
2352 * for the given RPI. If rpi found
2353 * it return the node list pointer
2356 struct lpfc_nodelist
*
2357 lpfc_findnode_rpi(struct lpfc_hba
* phba
, uint16_t rpi
)
2359 struct lpfc_nodelist
*ndlp
;
2360 struct list_head
* lists
[]={&phba
->fc_nlpunmap_list
,
2361 &phba
->fc_nlpmap_list
,
2362 &phba
->fc_plogi_list
,
2363 &phba
->fc_adisc_list
,
2364 &phba
->fc_reglogin_list
};
2367 spin_lock_irq(phba
->host
->host_lock
);
2368 for (i
= 0; i
< ARRAY_SIZE(lists
); i
++ )
2369 list_for_each_entry(ndlp
, lists
[i
], nlp_listp
)
2370 if (ndlp
->nlp_rpi
== rpi
) {
2371 spin_unlock_irq(phba
->host
->host_lock
);
2374 spin_unlock_irq(phba
->host
->host_lock
);
2379 * This routine looks up the ndlp lists
2380 * for the given WWPN. If WWPN found
2381 * it return the node list pointer
2384 struct lpfc_nodelist
*
2385 lpfc_findnode_wwpn(struct lpfc_hba
* phba
, uint32_t order
,
2386 struct lpfc_name
* wwpn
)
2388 struct lpfc_nodelist
*ndlp
;
2389 struct list_head
* lists
[]={&phba
->fc_nlpunmap_list
,
2390 &phba
->fc_nlpmap_list
,
2392 &phba
->fc_plogi_list
,
2393 &phba
->fc_adisc_list
,
2394 &phba
->fc_reglogin_list
,
2395 &phba
->fc_prli_list
};
2396 uint32_t search
[]={NLP_SEARCH_UNMAPPED
,
2401 NLP_SEARCH_REGLOGIN
,
2405 spin_lock_irq(phba
->host
->host_lock
);
2406 for (i
= 0; i
< ARRAY_SIZE(lists
); i
++ ) {
2407 if (!(order
& search
[i
]))
2409 list_for_each_entry(ndlp
, lists
[i
], nlp_listp
) {
2410 if (memcmp(&ndlp
->nlp_portname
, wwpn
,
2411 sizeof(struct lpfc_name
)) == 0) {
2412 spin_unlock_irq(phba
->host
->host_lock
);
2417 spin_unlock_irq(phba
->host
->host_lock
);
2422 lpfc_nlp_init(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
,
2425 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
2426 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
2427 init_timer(&ndlp
->nlp_delayfunc
);
2428 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
2429 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
2430 ndlp
->nlp_DID
= did
;
2431 ndlp
->nlp_phba
= phba
;
2432 ndlp
->nlp_sid
= NLP_NO_SID
;