2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
23 #include "bfa_defs_svc.h"
25 BFA_TRC_FILE(CNA
, IOC
);
28 * IOC local definitions
30 #define BFA_IOC_TOV 3000 /* msecs */
31 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
32 #define BFA_IOC_HB_TOV 500 /* msecs */
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
36 #define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
41 #define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
46 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
71 #define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
75 bfa_boolean_t bfa_auto_recover
= BFA_TRUE
;
78 * forward declarations
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
);
82 static void bfa_ioc_timeout(void *ioc
);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s
*ioc
);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s
*ioc
);
90 static void bfa_ioc_recover(struct bfa_ioc_s
*ioc
);
91 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s
*ioc
);
92 static void bfa_ioc_event_notify(struct bfa_ioc_s
*ioc
,
93 enum bfa_ioc_event_e event
);
94 static void bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
);
95 static void bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
);
96 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s
*ioc
);
97 static void bfa_ioc_fail_notify(struct bfa_ioc_s
*ioc
);
98 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s
*ioc
);
101 * IOC state machine definitions/declarations
104 IOC_E_RESET
= 1, /* IOC reset request */
105 IOC_E_ENABLE
= 2, /* IOC enable request */
106 IOC_E_DISABLE
= 3, /* IOC disable request */
107 IOC_E_DETACH
= 4, /* driver detach cleanup */
108 IOC_E_ENABLED
= 5, /* f/w enabled */
109 IOC_E_FWRSP_GETATTR
= 6, /* IOC get attribute response */
110 IOC_E_DISABLED
= 7, /* f/w disabled */
111 IOC_E_PFFAILED
= 8, /* failure notice by iocpf sm */
112 IOC_E_HBFAIL
= 9, /* heartbeat failure */
113 IOC_E_HWERROR
= 10, /* hardware error interrupt */
114 IOC_E_TIMEOUT
= 11, /* timeout */
115 IOC_E_HWFAILED
= 12, /* PCI mapping failure notice */
116 IOC_E_FWRSP_ACQ_ADDR
= 13, /* Acquiring address */
119 bfa_fsm_state_decl(bfa_ioc
, uninit
, struct bfa_ioc_s
, enum ioc_event
);
120 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc_s
, enum ioc_event
);
121 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc_s
, enum ioc_event
);
122 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc_s
, enum ioc_event
);
123 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc_s
, enum ioc_event
);
124 bfa_fsm_state_decl(bfa_ioc
, fail_retry
, struct bfa_ioc_s
, enum ioc_event
);
125 bfa_fsm_state_decl(bfa_ioc
, fail
, struct bfa_ioc_s
, enum ioc_event
);
126 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc_s
, enum ioc_event
);
127 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc_s
, enum ioc_event
);
128 bfa_fsm_state_decl(bfa_ioc
, hwfail
, struct bfa_ioc_s
, enum ioc_event
);
129 bfa_fsm_state_decl(bfa_ioc
, acq_addr
, struct bfa_ioc_s
, enum ioc_event
);
131 static struct bfa_sm_table_s ioc_sm_table
[] = {
132 {BFA_SM(bfa_ioc_sm_uninit
), BFA_IOC_UNINIT
},
133 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
134 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_ENABLING
},
135 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
136 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
137 {BFA_SM(bfa_ioc_sm_fail_retry
), BFA_IOC_INITFAIL
},
138 {BFA_SM(bfa_ioc_sm_fail
), BFA_IOC_FAIL
},
139 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
140 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
141 {BFA_SM(bfa_ioc_sm_hwfail
), BFA_IOC_HWFAIL
},
142 {BFA_SM(bfa_ioc_sm_acq_addr
), BFA_IOC_ACQ_ADDR
},
146 * IOCPF state machine definitions/declarations
149 #define bfa_iocpf_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
152 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
154 #define bfa_iocpf_poll_timer_start(__ioc) \
155 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
156 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
158 #define bfa_sem_timer_start(__ioc) \
159 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
160 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
161 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
164 * Forward declareations for iocpf state machine
166 static void bfa_iocpf_timeout(void *ioc_arg
);
167 static void bfa_iocpf_sem_timeout(void *ioc_arg
);
168 static void bfa_iocpf_poll_timeout(void *ioc_arg
);
171 * IOCPF state machine events
174 IOCPF_E_ENABLE
= 1, /* IOCPF enable request */
175 IOCPF_E_DISABLE
= 2, /* IOCPF disable request */
176 IOCPF_E_STOP
= 3, /* stop on driver detach */
177 IOCPF_E_FWREADY
= 4, /* f/w initialization done */
178 IOCPF_E_FWRSP_ENABLE
= 5, /* enable f/w response */
179 IOCPF_E_FWRSP_DISABLE
= 6, /* disable f/w response */
180 IOCPF_E_FAIL
= 7, /* failure notice by ioc sm */
181 IOCPF_E_INITFAIL
= 8, /* init fail notice by ioc sm */
182 IOCPF_E_GETATTRFAIL
= 9, /* init fail notice by ioc sm */
183 IOCPF_E_SEMLOCKED
= 10, /* h/w semaphore is locked */
184 IOCPF_E_TIMEOUT
= 11, /* f/w response timeout */
185 IOCPF_E_SEM_ERROR
= 12, /* h/w sem mapping error */
191 enum bfa_iocpf_state
{
192 BFA_IOCPF_RESET
= 1, /* IOC is in reset state */
193 BFA_IOCPF_SEMWAIT
= 2, /* Waiting for IOC h/w semaphore */
194 BFA_IOCPF_HWINIT
= 3, /* IOC h/w is being initialized */
195 BFA_IOCPF_READY
= 4, /* IOCPF is initialized */
196 BFA_IOCPF_INITFAIL
= 5, /* IOCPF failed */
197 BFA_IOCPF_FAIL
= 6, /* IOCPF failed */
198 BFA_IOCPF_DISABLING
= 7, /* IOCPF is being disabled */
199 BFA_IOCPF_DISABLED
= 8, /* IOCPF is disabled */
200 BFA_IOCPF_FWMISMATCH
= 9, /* IOC f/w different from drivers */
203 bfa_fsm_state_decl(bfa_iocpf
, reset
, struct bfa_iocpf_s
, enum iocpf_event
);
204 bfa_fsm_state_decl(bfa_iocpf
, fwcheck
, struct bfa_iocpf_s
, enum iocpf_event
);
205 bfa_fsm_state_decl(bfa_iocpf
, mismatch
, struct bfa_iocpf_s
, enum iocpf_event
);
206 bfa_fsm_state_decl(bfa_iocpf
, semwait
, struct bfa_iocpf_s
, enum iocpf_event
);
207 bfa_fsm_state_decl(bfa_iocpf
, hwinit
, struct bfa_iocpf_s
, enum iocpf_event
);
208 bfa_fsm_state_decl(bfa_iocpf
, enabling
, struct bfa_iocpf_s
, enum iocpf_event
);
209 bfa_fsm_state_decl(bfa_iocpf
, ready
, struct bfa_iocpf_s
, enum iocpf_event
);
210 bfa_fsm_state_decl(bfa_iocpf
, initfail_sync
, struct bfa_iocpf_s
,
212 bfa_fsm_state_decl(bfa_iocpf
, initfail
, struct bfa_iocpf_s
, enum iocpf_event
);
213 bfa_fsm_state_decl(bfa_iocpf
, fail_sync
, struct bfa_iocpf_s
, enum iocpf_event
);
214 bfa_fsm_state_decl(bfa_iocpf
, fail
, struct bfa_iocpf_s
, enum iocpf_event
);
215 bfa_fsm_state_decl(bfa_iocpf
, disabling
, struct bfa_iocpf_s
, enum iocpf_event
);
216 bfa_fsm_state_decl(bfa_iocpf
, disabling_sync
, struct bfa_iocpf_s
,
218 bfa_fsm_state_decl(bfa_iocpf
, disabled
, struct bfa_iocpf_s
, enum iocpf_event
);
220 static struct bfa_sm_table_s iocpf_sm_table
[] = {
221 {BFA_SM(bfa_iocpf_sm_reset
), BFA_IOCPF_RESET
},
222 {BFA_SM(bfa_iocpf_sm_fwcheck
), BFA_IOCPF_FWMISMATCH
},
223 {BFA_SM(bfa_iocpf_sm_mismatch
), BFA_IOCPF_FWMISMATCH
},
224 {BFA_SM(bfa_iocpf_sm_semwait
), BFA_IOCPF_SEMWAIT
},
225 {BFA_SM(bfa_iocpf_sm_hwinit
), BFA_IOCPF_HWINIT
},
226 {BFA_SM(bfa_iocpf_sm_enabling
), BFA_IOCPF_HWINIT
},
227 {BFA_SM(bfa_iocpf_sm_ready
), BFA_IOCPF_READY
},
228 {BFA_SM(bfa_iocpf_sm_initfail_sync
), BFA_IOCPF_INITFAIL
},
229 {BFA_SM(bfa_iocpf_sm_initfail
), BFA_IOCPF_INITFAIL
},
230 {BFA_SM(bfa_iocpf_sm_fail_sync
), BFA_IOCPF_FAIL
},
231 {BFA_SM(bfa_iocpf_sm_fail
), BFA_IOCPF_FAIL
},
232 {BFA_SM(bfa_iocpf_sm_disabling
), BFA_IOCPF_DISABLING
},
233 {BFA_SM(bfa_iocpf_sm_disabling_sync
), BFA_IOCPF_DISABLING
},
234 {BFA_SM(bfa_iocpf_sm_disabled
), BFA_IOCPF_DISABLED
},
242 * Beginning state. IOC uninit state.
246 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s
*ioc
)
251 * IOC is in uninit state.
254 bfa_ioc_sm_uninit(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
260 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
264 bfa_sm_fault(ioc
, event
);
268 * Reset entry actions -- initialize state machine
271 bfa_ioc_sm_reset_entry(struct bfa_ioc_s
*ioc
)
273 bfa_fsm_set_state(&ioc
->iocpf
, bfa_iocpf_sm_reset
);
277 * IOC is in reset state.
280 bfa_ioc_sm_reset(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
286 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
290 bfa_ioc_disable_comp(ioc
);
294 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
298 bfa_sm_fault(ioc
, event
);
304 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s
*ioc
)
306 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_ENABLE
);
310 * Host IOC function is being enabled, awaiting response from firmware.
311 * Semaphore is acquired.
314 bfa_ioc_sm_enabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
320 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
324 /* !!! fall through !!! */
326 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
327 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
328 if (event
!= IOC_E_PFFAILED
)
329 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
333 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
334 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
338 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
342 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
343 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
350 bfa_sm_fault(ioc
, event
);
356 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s
*ioc
)
358 bfa_ioc_timer_start(ioc
);
359 bfa_ioc_send_getattr(ioc
);
363 * IOC configuration in progress. Timer is active.
366 bfa_ioc_sm_getattr(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
371 case IOC_E_FWRSP_GETATTR
:
372 bfa_ioc_timer_stop(ioc
);
373 bfa_ioc_check_attr_wwns(ioc
);
374 bfa_ioc_hb_monitor(ioc
);
375 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
378 case IOC_E_FWRSP_ACQ_ADDR
:
379 bfa_ioc_timer_stop(ioc
);
380 bfa_ioc_hb_monitor(ioc
);
381 bfa_fsm_set_state(ioc
, bfa_ioc_sm_acq_addr
);
386 bfa_ioc_timer_stop(ioc
);
387 /* !!! fall through !!! */
389 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
390 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
391 if (event
!= IOC_E_PFFAILED
)
392 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_GETATTRFAIL
);
396 bfa_ioc_timer_stop(ioc
);
397 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
404 bfa_sm_fault(ioc
, event
);
409 * Acquiring address from fabric (entry function)
412 bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s
*ioc
)
417 * Acquiring address from the fabric
420 bfa_ioc_sm_acq_addr(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
425 case IOC_E_FWRSP_GETATTR
:
426 bfa_ioc_check_attr_wwns(ioc
);
427 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
432 bfa_hb_timer_stop(ioc
);
434 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
435 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
436 if (event
!= IOC_E_PFFAILED
)
437 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_GETATTRFAIL
);
441 bfa_hb_timer_stop(ioc
);
442 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
449 bfa_sm_fault(ioc
, event
);
454 bfa_ioc_sm_op_entry(struct bfa_ioc_s
*ioc
)
456 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
458 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
459 bfa_ioc_event_notify(ioc
, BFA_IOC_E_ENABLED
);
460 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
, "IOC enabled\n");
461 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_ENABLE
);
465 bfa_ioc_sm_op(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
474 bfa_hb_timer_stop(ioc
);
475 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
480 bfa_hb_timer_stop(ioc
);
481 /* !!! fall through !!! */
483 if (ioc
->iocpf
.auto_recover
)
484 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail_retry
);
486 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
488 bfa_ioc_fail_notify(ioc
);
490 if (event
!= IOC_E_PFFAILED
)
491 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
495 bfa_sm_fault(ioc
, event
);
501 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s
*ioc
)
503 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
504 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_DISABLE
);
505 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
, "IOC disabled\n");
506 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_DISABLE
);
510 * IOC is being disabled
513 bfa_ioc_sm_disabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
519 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
524 * No state change. Will move to disabled state
525 * after iocpf sm completes failure processing and
526 * moves to disabled state.
528 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
532 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
533 bfa_ioc_disable_comp(ioc
);
537 bfa_sm_fault(ioc
, event
);
542 * IOC disable completion entry.
545 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s
*ioc
)
547 bfa_ioc_disable_comp(ioc
);
551 bfa_ioc_sm_disabled(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
557 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
561 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
565 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
566 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
570 bfa_sm_fault(ioc
, event
);
576 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s
*ioc
)
582 * Hardware initialization retry.
585 bfa_ioc_sm_fail_retry(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
591 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
597 * Initialization retry failed.
599 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
600 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
601 if (event
!= IOC_E_PFFAILED
)
602 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
606 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
607 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
614 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
618 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
619 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
623 bfa_sm_fault(ioc
, event
);
629 bfa_ioc_sm_fail_entry(struct bfa_ioc_s
*ioc
)
638 bfa_ioc_sm_fail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
645 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
649 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
653 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
654 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
659 * HB failure notification, ignore.
663 bfa_sm_fault(ioc
, event
);
668 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s
*ioc
)
674 bfa_ioc_sm_hwfail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
680 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
684 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
688 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
692 bfa_sm_fault(ioc
, event
);
697 * IOCPF State Machine
701 * Reset entry actions -- initialize state machine
704 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s
*iocpf
)
706 iocpf
->fw_mismatch_notified
= BFA_FALSE
;
707 iocpf
->auto_recover
= bfa_auto_recover
;
711 * Beginning state. IOC is in reset state.
714 bfa_iocpf_sm_reset(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
716 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
722 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
729 bfa_sm_fault(ioc
, event
);
734 * Semaphore should be acquired for version check.
737 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s
*iocpf
)
739 struct bfi_ioc_image_hdr_s fwhdr
;
740 u32 r32
, fwstate
, pgnum
, pgoff
, loff
= 0;
744 * Spin on init semaphore to serialize.
746 r32
= readl(iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
749 r32
= readl(iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
753 fwstate
= readl(iocpf
->ioc
->ioc_regs
.ioc_fwstate
);
754 if (fwstate
== BFI_IOC_UNINIT
) {
755 writel(1, iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
759 bfa_ioc_fwver_get(iocpf
->ioc
, &fwhdr
);
761 if (swab32(fwhdr
.exec
) == BFI_FWBOOT_TYPE_NORMAL
) {
762 writel(1, iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
769 pgnum
= PSS_SMEM_PGNUM(iocpf
->ioc
->ioc_regs
.smem_pg0
, loff
);
770 pgoff
= PSS_SMEM_PGOFF(loff
);
771 writel(pgnum
, iocpf
->ioc
->ioc_regs
.host_page_num_fn
);
773 for (i
= 0; i
< sizeof(struct bfi_ioc_image_hdr_s
) / sizeof(u32
); i
++) {
774 bfa_mem_write(iocpf
->ioc
->ioc_regs
.smem_page_start
, loff
, 0);
778 bfa_trc(iocpf
->ioc
, fwstate
);
779 bfa_trc(iocpf
->ioc
, swab32(fwhdr
.exec
));
780 writel(BFI_IOC_UNINIT
, iocpf
->ioc
->ioc_regs
.ioc_fwstate
);
781 writel(BFI_IOC_UNINIT
, iocpf
->ioc
->ioc_regs
.alt_ioc_fwstate
);
784 * Unlock the hw semaphore. Should be here only once per boot.
786 readl(iocpf
->ioc
->ioc_regs
.ioc_sem_reg
);
787 writel(1, iocpf
->ioc
->ioc_regs
.ioc_sem_reg
);
790 * unlock init semaphore.
792 writel(1, iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
795 bfa_ioc_hw_sem_get(iocpf
->ioc
);
799 * Awaiting h/w semaphore to continue with version check.
802 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
804 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
809 case IOCPF_E_SEMLOCKED
:
810 if (bfa_ioc_firmware_lock(ioc
)) {
811 if (bfa_ioc_sync_start(ioc
)) {
812 bfa_ioc_sync_join(ioc
);
813 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
815 bfa_ioc_firmware_unlock(ioc
);
816 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
817 bfa_sem_timer_start(ioc
);
820 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
821 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_mismatch
);
825 case IOCPF_E_SEM_ERROR
:
826 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
827 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
830 case IOCPF_E_DISABLE
:
831 bfa_sem_timer_stop(ioc
);
832 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
833 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
837 bfa_sem_timer_stop(ioc
);
838 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
842 bfa_sm_fault(ioc
, event
);
847 * Notify enable completion callback.
850 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s
*iocpf
)
853 * Call only the first time sm enters fwmismatch state.
855 if (iocpf
->fw_mismatch_notified
== BFA_FALSE
)
856 bfa_ioc_pf_fwmismatch(iocpf
->ioc
);
858 iocpf
->fw_mismatch_notified
= BFA_TRUE
;
859 bfa_iocpf_timer_start(iocpf
->ioc
);
863 * Awaiting firmware version match.
866 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
868 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
873 case IOCPF_E_TIMEOUT
:
874 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
877 case IOCPF_E_DISABLE
:
878 bfa_iocpf_timer_stop(ioc
);
879 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
880 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
884 bfa_iocpf_timer_stop(ioc
);
885 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
889 bfa_sm_fault(ioc
, event
);
894 * Request for semaphore.
897 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s
*iocpf
)
899 bfa_ioc_hw_sem_get(iocpf
->ioc
);
903 * Awaiting semaphore for h/w initialzation.
906 bfa_iocpf_sm_semwait(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
908 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
913 case IOCPF_E_SEMLOCKED
:
914 if (bfa_ioc_sync_complete(ioc
)) {
915 bfa_ioc_sync_join(ioc
);
916 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
918 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
919 bfa_sem_timer_start(ioc
);
923 case IOCPF_E_SEM_ERROR
:
924 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
925 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
928 case IOCPF_E_DISABLE
:
929 bfa_sem_timer_stop(ioc
);
930 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
934 bfa_sm_fault(ioc
, event
);
939 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s
*iocpf
)
941 iocpf
->poll_time
= 0;
942 bfa_ioc_hwinit(iocpf
->ioc
, BFA_FALSE
);
946 * Hardware is being initialized. Interrupts are enabled.
947 * Holding hardware semaphore lock.
950 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
952 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
957 case IOCPF_E_FWREADY
:
958 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_enabling
);
961 case IOCPF_E_TIMEOUT
:
962 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
963 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
964 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
967 case IOCPF_E_DISABLE
:
968 bfa_iocpf_timer_stop(ioc
);
969 bfa_ioc_sync_leave(ioc
);
970 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
971 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
975 bfa_sm_fault(ioc
, event
);
980 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s
*iocpf
)
982 bfa_iocpf_timer_start(iocpf
->ioc
);
984 * Enable Interrupts before sending fw IOC ENABLE cmd.
986 iocpf
->ioc
->cbfn
->reset_cbfn(iocpf
->ioc
->bfa
);
987 bfa_ioc_send_enable(iocpf
->ioc
);
991 * Host IOC function is being enabled, awaiting response from firmware.
992 * Semaphore is acquired.
995 bfa_iocpf_sm_enabling(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
997 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1002 case IOCPF_E_FWRSP_ENABLE
:
1003 bfa_iocpf_timer_stop(ioc
);
1004 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1005 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_ready
);
1008 case IOCPF_E_INITFAIL
:
1009 bfa_iocpf_timer_stop(ioc
);
1011 * !!! fall through !!!
1014 case IOCPF_E_TIMEOUT
:
1015 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1016 if (event
== IOCPF_E_TIMEOUT
)
1017 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
1018 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
1021 case IOCPF_E_DISABLE
:
1022 bfa_iocpf_timer_stop(ioc
);
1023 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1024 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
1028 bfa_sm_fault(ioc
, event
);
1033 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s
*iocpf
)
1035 bfa_fsm_send_event(iocpf
->ioc
, IOC_E_ENABLED
);
1039 bfa_iocpf_sm_ready(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1041 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1043 bfa_trc(ioc
, event
);
1046 case IOCPF_E_DISABLE
:
1047 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
1050 case IOCPF_E_GETATTRFAIL
:
1051 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
1055 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail_sync
);
1059 bfa_sm_fault(ioc
, event
);
1064 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s
*iocpf
)
1066 bfa_iocpf_timer_start(iocpf
->ioc
);
1067 bfa_ioc_send_disable(iocpf
->ioc
);
1071 * IOC is being disabled
1074 bfa_iocpf_sm_disabling(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1076 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1078 bfa_trc(ioc
, event
);
1081 case IOCPF_E_FWRSP_DISABLE
:
1082 bfa_iocpf_timer_stop(ioc
);
1083 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1087 bfa_iocpf_timer_stop(ioc
);
1089 * !!! fall through !!!
1092 case IOCPF_E_TIMEOUT
:
1093 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1094 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1097 case IOCPF_E_FWRSP_ENABLE
:
1101 bfa_sm_fault(ioc
, event
);
1106 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s
*iocpf
)
1108 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1112 * IOC hb ack request is being removed.
1115 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1117 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1119 bfa_trc(ioc
, event
);
1122 case IOCPF_E_SEMLOCKED
:
1123 bfa_ioc_sync_leave(ioc
);
1124 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1125 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1128 case IOCPF_E_SEM_ERROR
:
1129 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1130 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1137 bfa_sm_fault(ioc
, event
);
1142 * IOC disable completion entry.
1145 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s
*iocpf
)
1147 bfa_ioc_mbox_flush(iocpf
->ioc
);
1148 bfa_fsm_send_event(iocpf
->ioc
, IOC_E_DISABLED
);
1152 bfa_iocpf_sm_disabled(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1154 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1156 bfa_trc(ioc
, event
);
1159 case IOCPF_E_ENABLE
:
1160 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1164 bfa_ioc_firmware_unlock(ioc
);
1165 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1169 bfa_sm_fault(ioc
, event
);
1174 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s
*iocpf
)
1176 bfa_ioc_debug_save_ftrc(iocpf
->ioc
);
1177 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1181 * Hardware initialization failed.
1184 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1186 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1188 bfa_trc(ioc
, event
);
1191 case IOCPF_E_SEMLOCKED
:
1192 bfa_ioc_notify_fail(ioc
);
1193 bfa_ioc_sync_leave(ioc
);
1194 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1195 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1196 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail
);
1199 case IOCPF_E_SEM_ERROR
:
1200 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1201 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1204 case IOCPF_E_DISABLE
:
1205 bfa_sem_timer_stop(ioc
);
1206 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1210 bfa_sem_timer_stop(ioc
);
1211 bfa_ioc_firmware_unlock(ioc
);
1212 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1219 bfa_sm_fault(ioc
, event
);
1224 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s
*iocpf
)
1226 bfa_trc(iocpf
->ioc
, 0);
1230 * Hardware initialization failed.
1233 bfa_iocpf_sm_initfail(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1235 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1237 bfa_trc(ioc
, event
);
1240 case IOCPF_E_DISABLE
:
1241 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1245 bfa_ioc_firmware_unlock(ioc
);
1246 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1250 bfa_sm_fault(ioc
, event
);
1255 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s
*iocpf
)
1258 * Mark IOC as failed in hardware and stop firmware.
1260 bfa_ioc_lpu_stop(iocpf
->ioc
);
1263 * Flush any queued up mailbox requests.
1265 bfa_ioc_mbox_flush(iocpf
->ioc
);
1267 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1271 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1273 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1275 bfa_trc(ioc
, event
);
1278 case IOCPF_E_SEMLOCKED
:
1279 bfa_ioc_sync_ack(ioc
);
1280 bfa_ioc_notify_fail(ioc
);
1281 if (!iocpf
->auto_recover
) {
1282 bfa_ioc_sync_leave(ioc
);
1283 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1284 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1285 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1287 if (bfa_ioc_sync_complete(ioc
))
1288 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
1290 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1291 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1296 case IOCPF_E_SEM_ERROR
:
1297 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1298 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1301 case IOCPF_E_DISABLE
:
1302 bfa_sem_timer_stop(ioc
);
1303 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1310 bfa_sm_fault(ioc
, event
);
1315 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s
*iocpf
)
1317 bfa_trc(iocpf
->ioc
, 0);
1321 * IOC is in failed state.
1324 bfa_iocpf_sm_fail(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1326 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1328 bfa_trc(ioc
, event
);
1331 case IOCPF_E_DISABLE
:
1332 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1336 bfa_sm_fault(ioc
, event
);
1341 * BFA IOC private functions
1345 * Notify common modules registered for notification.
1348 bfa_ioc_event_notify(struct bfa_ioc_s
*ioc
, enum bfa_ioc_event_e event
)
1350 struct bfa_ioc_notify_s
*notify
;
1351 struct list_head
*qe
;
1353 list_for_each(qe
, &ioc
->notify_q
) {
1354 notify
= (struct bfa_ioc_notify_s
*)qe
;
1355 notify
->cbfn(notify
->cbarg
, event
);
1360 bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
)
1362 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
1363 bfa_ioc_event_notify(ioc
, BFA_IOC_E_DISABLED
);
1367 bfa_ioc_sem_get(void __iomem
*sem_reg
)
1371 #define BFA_SEM_SPINCNT 3000
1373 r32
= readl(sem_reg
);
1375 while ((r32
& 1) && (cnt
< BFA_SEM_SPINCNT
)) {
1378 r32
= readl(sem_reg
);
1388 bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
)
1393 * First read to the semaphore register will return 0, subsequent reads
1394 * will return 1. Semaphore is released by writing 1 to the register
1396 r32
= readl(ioc
->ioc_regs
.ioc_sem_reg
);
1399 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEM_ERROR
);
1403 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEMLOCKED
);
1407 bfa_sem_timer_start(ioc
);
1411 * Initialize LPU local memory (aka secondary memory / SRAM)
1414 bfa_ioc_lmem_init(struct bfa_ioc_s
*ioc
)
1418 #define PSS_LMEM_INIT_TIME 10000
1420 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1421 pss_ctl
&= ~__PSS_LMEM_RESET
;
1422 pss_ctl
|= __PSS_LMEM_INIT_EN
;
1425 * i2c workaround 12.5khz clock
1427 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL);
1428 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1431 * wait for memory initialization to be complete
1435 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1437 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
1440 * If memory initialization is not successful, IOC timeout will catch
1443 WARN_ON(!(pss_ctl
& __PSS_LMEM_INIT_DONE
));
1444 bfa_trc(ioc
, pss_ctl
);
1446 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
1447 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1451 bfa_ioc_lpu_start(struct bfa_ioc_s
*ioc
)
1456 * Take processor out of reset.
1458 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1459 pss_ctl
&= ~__PSS_LPU0_RESET
;
1461 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1465 bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
)
1470 * Put processors in reset.
1472 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1473 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
1475 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1479 * Get driver and firmware versions.
1482 bfa_ioc_fwver_get(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
1487 u32
*fwsig
= (u32
*) fwhdr
;
1489 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1490 pgoff
= PSS_SMEM_PGOFF(loff
);
1491 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1493 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr_s
) / sizeof(u32
));
1496 bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
1497 loff
+= sizeof(u32
);
1502 * Returns TRUE if same.
1505 bfa_ioc_fwver_cmp(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
1507 struct bfi_ioc_image_hdr_s
*drv_fwhdr
;
1510 drv_fwhdr
= (struct bfi_ioc_image_hdr_s
*)
1511 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1513 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
1514 if (fwhdr
->md5sum
[i
] != drv_fwhdr
->md5sum
[i
]) {
1516 bfa_trc(ioc
, fwhdr
->md5sum
[i
]);
1517 bfa_trc(ioc
, drv_fwhdr
->md5sum
[i
]);
1522 bfa_trc(ioc
, fwhdr
->md5sum
[0]);
1527 * Return true if current running version is valid. Firmware signature and
1528 * execution context (driver/bios) must match.
1530 static bfa_boolean_t
1531 bfa_ioc_fwver_valid(struct bfa_ioc_s
*ioc
, u32 boot_env
)
1533 struct bfi_ioc_image_hdr_s fwhdr
, *drv_fwhdr
;
1535 bfa_ioc_fwver_get(ioc
, &fwhdr
);
1536 drv_fwhdr
= (struct bfi_ioc_image_hdr_s
*)
1537 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1539 if (fwhdr
.signature
!= drv_fwhdr
->signature
) {
1540 bfa_trc(ioc
, fwhdr
.signature
);
1541 bfa_trc(ioc
, drv_fwhdr
->signature
);
1545 if (swab32(fwhdr
.bootenv
) != boot_env
) {
1546 bfa_trc(ioc
, fwhdr
.bootenv
);
1547 bfa_trc(ioc
, boot_env
);
1551 return bfa_ioc_fwver_cmp(ioc
, &fwhdr
);
1555 * Conditionally flush any pending message from firmware at start.
1558 bfa_ioc_msgflush(struct bfa_ioc_s
*ioc
)
1562 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
1564 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
1568 bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
)
1570 enum bfi_ioc_state ioc_fwstate
;
1571 bfa_boolean_t fwvalid
;
1575 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
1578 ioc_fwstate
= BFI_IOC_UNINIT
;
1580 bfa_trc(ioc
, ioc_fwstate
);
1582 boot_type
= BFI_FWBOOT_TYPE_NORMAL
;
1583 boot_env
= BFI_FWBOOT_ENV_OS
;
1586 * check if firmware is valid
1588 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
1589 BFA_FALSE
: bfa_ioc_fwver_valid(ioc
, boot_env
);
1592 bfa_ioc_boot(ioc
, boot_type
, boot_env
);
1593 bfa_ioc_poll_fwinit(ioc
);
1598 * If hardware initialization is in progress (initialized by other IOC),
1599 * just wait for an initialization completion interrupt.
1601 if (ioc_fwstate
== BFI_IOC_INITING
) {
1602 bfa_ioc_poll_fwinit(ioc
);
1607 * If IOC function is disabled and firmware version is same,
1608 * just re-enable IOC.
1610 * If option rom, IOC must not be in operational state. With
1611 * convergence, IOC will be in operational state when 2nd driver
1614 if (ioc_fwstate
== BFI_IOC_DISABLED
|| ioc_fwstate
== BFI_IOC_OP
) {
1617 * When using MSI-X any pending firmware ready event should
1618 * be flushed. Otherwise MSI-X interrupts are not delivered.
1620 bfa_ioc_msgflush(ioc
);
1621 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
1626 * Initialize the h/w for any other states.
1628 bfa_ioc_boot(ioc
, boot_type
, boot_env
);
1629 bfa_ioc_poll_fwinit(ioc
);
1633 bfa_ioc_timeout(void *ioc_arg
)
1635 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
1638 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
1642 bfa_ioc_mbox_send(struct bfa_ioc_s
*ioc
, void *ioc_msg
, int len
)
1644 u32
*msgp
= (u32
*) ioc_msg
;
1647 bfa_trc(ioc
, msgp
[0]);
1650 WARN_ON(len
> BFI_IOC_MSGLEN_MAX
);
1653 * first write msg to mailbox registers
1655 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
1656 writel(cpu_to_le32(msgp
[i
]),
1657 ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1659 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
1660 writel(0, ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1663 * write 1 to mailbox CMD to trigger LPU event
1665 writel(1, ioc
->ioc_regs
.hfn_mbox_cmd
);
1666 (void) readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1670 bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
)
1672 struct bfi_ioc_ctrl_req_s enable_req
;
1675 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
1676 bfa_ioc_portid(ioc
));
1677 enable_req
.clscode
= cpu_to_be16(ioc
->clscode
);
1678 do_gettimeofday(&tv
);
1679 enable_req
.tv_sec
= be32_to_cpu(tv
.tv_sec
);
1680 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1684 bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
)
1686 struct bfi_ioc_ctrl_req_s disable_req
;
1688 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
1689 bfa_ioc_portid(ioc
));
1690 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1694 bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
)
1696 struct bfi_ioc_getattr_req_s attr_req
;
1698 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1699 bfa_ioc_portid(ioc
));
1700 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1701 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1705 bfa_ioc_hb_check(void *cbarg
)
1707 struct bfa_ioc_s
*ioc
= cbarg
;
1710 hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1711 if (ioc
->hb_count
== hb_count
) {
1712 bfa_ioc_recover(ioc
);
1715 ioc
->hb_count
= hb_count
;
1718 bfa_ioc_mbox_poll(ioc
);
1719 bfa_hb_timer_start(ioc
);
1723 bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
)
1725 ioc
->hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1726 bfa_hb_timer_start(ioc
);
1730 * Initiate a full firmware download.
1733 bfa_ioc_download_fw(struct bfa_ioc_s
*ioc
, u32 boot_type
,
1743 bfa_trc(ioc
, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)));
1744 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), chunkno
);
1746 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1747 pgoff
= PSS_SMEM_PGOFF(loff
);
1749 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1751 for (i
= 0; i
< bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)); i
++) {
1753 if (BFA_IOC_FLASH_CHUNK_NO(i
) != chunkno
) {
1754 chunkno
= BFA_IOC_FLASH_CHUNK_NO(i
);
1755 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
),
1756 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
1762 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
,
1763 fwimg
[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i
)]);
1765 loff
+= sizeof(u32
);
1768 * handle page offset wrap around
1770 loff
= PSS_SMEM_PGOFF(loff
);
1773 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1777 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1778 ioc
->ioc_regs
.host_page_num_fn
);
1781 * Set boot type and device mode at the end.
1783 asicmode
= BFI_FWBOOT_DEVMODE(ioc
->asic_gen
, ioc
->asic_mode
,
1784 ioc
->port0_mode
, ioc
->port1_mode
);
1785 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_DEVMODE_OFF
,
1787 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_TYPE_OFF
,
1789 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_ENV_OFF
,
1795 * Update BFA configuration from firmware configuration.
1798 bfa_ioc_getattr_reply(struct bfa_ioc_s
*ioc
)
1800 struct bfi_ioc_attr_s
*attr
= ioc
->attr
;
1802 attr
->adapter_prop
= be32_to_cpu(attr
->adapter_prop
);
1803 attr
->card_type
= be32_to_cpu(attr
->card_type
);
1804 attr
->maxfrsize
= be16_to_cpu(attr
->maxfrsize
);
1805 ioc
->fcmode
= (attr
->port_mode
== BFI_PORT_MODE_FC
);
1807 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
1811 * Attach time initialization of mbox logic.
1814 bfa_ioc_mbox_attach(struct bfa_ioc_s
*ioc
)
1816 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1819 INIT_LIST_HEAD(&mod
->cmd_q
);
1820 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
1821 mod
->mbhdlr
[mc
].cbfn
= NULL
;
1822 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
1827 * Mbox poll timer -- restarts any pending mailbox requests.
1830 bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
)
1832 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1833 struct bfa_mbox_cmd_s
*cmd
;
1837 * If no command pending, do nothing
1839 if (list_empty(&mod
->cmd_q
))
1843 * If previous command is not yet fetched by firmware, do nothing
1845 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1850 * Enqueue command to firmware.
1852 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1853 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1857 * Cleanup any pending requests.
1860 bfa_ioc_mbox_flush(struct bfa_ioc_s
*ioc
)
1862 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1863 struct bfa_mbox_cmd_s
*cmd
;
1865 while (!list_empty(&mod
->cmd_q
))
1866 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1870 * Read data from SMEM to host through PCI memmap
1872 * @param[in] ioc memory for IOC
1873 * @param[in] tbuf app memory to store data from smem
1874 * @param[in] soff smem offset
1875 * @param[in] sz size of smem in bytes
1878 bfa_ioc_smem_read(struct bfa_ioc_s
*ioc
, void *tbuf
, u32 soff
, u32 sz
)
1885 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
1886 loff
= PSS_SMEM_PGOFF(soff
);
1887 bfa_trc(ioc
, pgnum
);
1892 * Hold semaphore to serialize pll init and fwtrc.
1894 if (BFA_FALSE
== bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
)) {
1896 return BFA_STATUS_FAILED
;
1899 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1901 len
= sz
/sizeof(u32
);
1903 for (i
= 0; i
< len
; i
++) {
1904 r32
= bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
1905 buf
[i
] = be32_to_cpu(r32
);
1906 loff
+= sizeof(u32
);
1909 * handle page offset wrap around
1911 loff
= PSS_SMEM_PGOFF(loff
);
1914 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1917 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1918 ioc
->ioc_regs
.host_page_num_fn
);
1920 * release semaphore.
1922 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1923 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1925 bfa_trc(ioc
, pgnum
);
1926 return BFA_STATUS_OK
;
1930 * Clear SMEM data from host through PCI memmap
1932 * @param[in] ioc memory for IOC
1933 * @param[in] soff smem offset
1934 * @param[in] sz size of smem in bytes
1937 bfa_ioc_smem_clr(struct bfa_ioc_s
*ioc
, u32 soff
, u32 sz
)
1942 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
1943 loff
= PSS_SMEM_PGOFF(soff
);
1944 bfa_trc(ioc
, pgnum
);
1949 * Hold semaphore to serialize pll init and fwtrc.
1951 if (BFA_FALSE
== bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
)) {
1953 return BFA_STATUS_FAILED
;
1956 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1958 len
= sz
/sizeof(u32
); /* len in words */
1960 for (i
= 0; i
< len
; i
++) {
1961 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
, 0);
1962 loff
+= sizeof(u32
);
1965 * handle page offset wrap around
1967 loff
= PSS_SMEM_PGOFF(loff
);
1970 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1973 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1974 ioc
->ioc_regs
.host_page_num_fn
);
1977 * release semaphore.
1979 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1980 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1981 bfa_trc(ioc
, pgnum
);
1982 return BFA_STATUS_OK
;
1986 bfa_ioc_fail_notify(struct bfa_ioc_s
*ioc
)
1988 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
1991 * Notify driver and common modules registered for notification.
1993 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
1994 bfa_ioc_event_notify(ioc
, BFA_IOC_E_FAILED
);
1996 bfa_ioc_debug_save_ftrc(ioc
);
1998 BFA_LOG(KERN_CRIT
, bfad
, bfa_log_level
,
1999 "Heart Beat of IOC has failed\n");
2000 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_HBFAIL
);
2005 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s
*ioc
)
2007 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
2009 * Provide enable completion callback.
2011 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
2012 BFA_LOG(KERN_WARNING
, bfad
, bfa_log_level
,
2013 "Running firmware version is incompatible "
2014 "with the driver version\n");
2015 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_FWMISMATCH
);
2019 bfa_ioc_pll_init(struct bfa_ioc_s
*ioc
)
2023 * Hold semaphore so that nobody can access the chip during init.
2025 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
);
2027 bfa_ioc_pll_init_asic(ioc
);
2029 ioc
->pllinit
= BFA_TRUE
;
2034 bfa_ioc_lmem_init(ioc
);
2037 * release semaphore.
2039 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
2040 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
2042 return BFA_STATUS_OK
;
2046 * Interface used by diag module to do firmware boot with memory test
2047 * as the entry vector.
2050 bfa_ioc_boot(struct bfa_ioc_s
*ioc
, u32 boot_type
, u32 boot_env
)
2052 bfa_ioc_stats(ioc
, ioc_boots
);
2054 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
2058 * Initialize IOC state of all functions on a chip reset.
2060 if (boot_type
== BFI_FWBOOT_TYPE_MEMTEST
) {
2061 writel(BFI_IOC_MEMTEST
, ioc
->ioc_regs
.ioc_fwstate
);
2062 writel(BFI_IOC_MEMTEST
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2064 writel(BFI_IOC_INITING
, ioc
->ioc_regs
.ioc_fwstate
);
2065 writel(BFI_IOC_INITING
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2068 bfa_ioc_msgflush(ioc
);
2069 bfa_ioc_download_fw(ioc
, boot_type
, boot_env
);
2070 bfa_ioc_lpu_start(ioc
);
2074 * Enable/disable IOC failure auto recovery.
2077 bfa_ioc_auto_recover(bfa_boolean_t auto_recover
)
2079 bfa_auto_recover
= auto_recover
;
2085 bfa_ioc_is_operational(struct bfa_ioc_s
*ioc
)
2087 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
2091 bfa_ioc_is_initialized(struct bfa_ioc_s
*ioc
)
2093 u32 r32
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2095 return ((r32
!= BFI_IOC_UNINIT
) &&
2096 (r32
!= BFI_IOC_INITING
) &&
2097 (r32
!= BFI_IOC_MEMTEST
));
2101 bfa_ioc_msgget(struct bfa_ioc_s
*ioc
, void *mbmsg
)
2103 __be32
*msgp
= mbmsg
;
2107 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2114 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
2116 r32
= readl(ioc
->ioc_regs
.lpu_mbox
+
2118 msgp
[i
] = cpu_to_be32(r32
);
2122 * turn off mailbox interrupt by clearing mailbox status
2124 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
2125 readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2131 bfa_ioc_isr(struct bfa_ioc_s
*ioc
, struct bfi_mbmsg_s
*m
)
2133 union bfi_ioc_i2h_msg_u
*msg
;
2134 struct bfa_iocpf_s
*iocpf
= &ioc
->iocpf
;
2136 msg
= (union bfi_ioc_i2h_msg_u
*) m
;
2138 bfa_ioc_stats(ioc
, ioc_isrs
);
2140 switch (msg
->mh
.msg_id
) {
2141 case BFI_IOC_I2H_HBEAT
:
2144 case BFI_IOC_I2H_ENABLE_REPLY
:
2145 ioc
->port_mode
= ioc
->port_mode_cfg
=
2146 (enum bfa_mode_s
)msg
->fw_event
.port_mode
;
2147 ioc
->ad_cap_bm
= msg
->fw_event
.cap_bm
;
2148 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_ENABLE
);
2151 case BFI_IOC_I2H_DISABLE_REPLY
:
2152 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_DISABLE
);
2155 case BFI_IOC_I2H_GETATTR_REPLY
:
2156 bfa_ioc_getattr_reply(ioc
);
2159 case BFI_IOC_I2H_ACQ_ADDR_REPLY
:
2160 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_ACQ_ADDR
);
2164 bfa_trc(ioc
, msg
->mh
.msg_id
);
2170 * IOC attach time initialization and setup.
2172 * @param[in] ioc memory for IOC
2173 * @param[in] bfa driver instance structure
2176 bfa_ioc_attach(struct bfa_ioc_s
*ioc
, void *bfa
, struct bfa_ioc_cbfn_s
*cbfn
,
2177 struct bfa_timer_mod_s
*timer_mod
)
2181 ioc
->timer_mod
= timer_mod
;
2182 ioc
->fcmode
= BFA_FALSE
;
2183 ioc
->pllinit
= BFA_FALSE
;
2184 ioc
->dbg_fwsave_once
= BFA_TRUE
;
2185 ioc
->iocpf
.ioc
= ioc
;
2187 bfa_ioc_mbox_attach(ioc
);
2188 INIT_LIST_HEAD(&ioc
->notify_q
);
2190 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
2191 bfa_fsm_send_event(ioc
, IOC_E_RESET
);
2195 * Driver detach time IOC cleanup.
2198 bfa_ioc_detach(struct bfa_ioc_s
*ioc
)
2200 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
2201 INIT_LIST_HEAD(&ioc
->notify_q
);
2205 * Setup IOC PCI properties.
2207 * @param[in] pcidev PCI device information for this IOC
2210 bfa_ioc_pci_init(struct bfa_ioc_s
*ioc
, struct bfa_pcidev_s
*pcidev
,
2211 enum bfi_pcifn_class clscode
)
2213 ioc
->clscode
= clscode
;
2214 ioc
->pcidev
= *pcidev
;
2217 * Initialize IOC and device personality
2219 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_FC
;
2220 ioc
->asic_mode
= BFI_ASIC_MODE_FC
;
2222 switch (pcidev
->device_id
) {
2223 case BFA_PCI_DEVICE_ID_FC_8G1P
:
2224 case BFA_PCI_DEVICE_ID_FC_8G2P
:
2225 ioc
->asic_gen
= BFI_ASIC_GEN_CB
;
2226 ioc
->fcmode
= BFA_TRUE
;
2227 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2228 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2231 case BFA_PCI_DEVICE_ID_CT
:
2232 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2233 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2234 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2235 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2236 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2239 case BFA_PCI_DEVICE_ID_CT_FC
:
2240 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2241 ioc
->fcmode
= BFA_TRUE
;
2242 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2243 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2246 case BFA_PCI_DEVICE_ID_CT2
:
2247 ioc
->asic_gen
= BFI_ASIC_GEN_CT2
;
2248 if (clscode
== BFI_PCIFN_CLASS_FC
&&
2249 pcidev
->ssid
== BFA_PCI_CT2_SSID_FC
) {
2250 ioc
->asic_mode
= BFI_ASIC_MODE_FC16
;
2251 ioc
->fcmode
= BFA_TRUE
;
2252 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2253 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2255 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2256 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2257 if (pcidev
->ssid
== BFA_PCI_CT2_SSID_FCoE
) {
2259 ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2260 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2263 ioc
->port_mode_cfg
= BFA_MODE_NIC
;
2264 ioc
->ad_cap_bm
= BFA_CM_NIC
;
2274 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2276 if (ioc
->asic_gen
== BFI_ASIC_GEN_CB
)
2277 bfa_ioc_set_cb_hwif(ioc
);
2278 else if (ioc
->asic_gen
== BFI_ASIC_GEN_CT
)
2279 bfa_ioc_set_ct_hwif(ioc
);
2281 WARN_ON(ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
);
2282 bfa_ioc_set_ct2_hwif(ioc
);
2283 bfa_ioc_ct2_poweron(ioc
);
2286 bfa_ioc_map_port(ioc
);
2287 bfa_ioc_reg_init(ioc
);
2291 * Initialize IOC dma memory
2293 * @param[in] dm_kva kernel virtual address of IOC dma memory
2294 * @param[in] dm_pa physical address of IOC dma memory
2297 bfa_ioc_mem_claim(struct bfa_ioc_s
*ioc
, u8
*dm_kva
, u64 dm_pa
)
2300 * dma memory for firmware attribute
2302 ioc
->attr_dma
.kva
= dm_kva
;
2303 ioc
->attr_dma
.pa
= dm_pa
;
2304 ioc
->attr
= (struct bfi_ioc_attr_s
*) dm_kva
;
2308 bfa_ioc_enable(struct bfa_ioc_s
*ioc
)
2310 bfa_ioc_stats(ioc
, ioc_enables
);
2311 ioc
->dbg_fwsave_once
= BFA_TRUE
;
2313 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
2317 bfa_ioc_disable(struct bfa_ioc_s
*ioc
)
2319 bfa_ioc_stats(ioc
, ioc_disables
);
2320 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
2325 * Initialize memory for saving firmware trace. Driver must initialize
2326 * trace memory before call bfa_ioc_enable().
2329 bfa_ioc_debug_memclaim(struct bfa_ioc_s
*ioc
, void *dbg_fwsave
)
2331 ioc
->dbg_fwsave
= dbg_fwsave
;
2332 ioc
->dbg_fwsave_len
= (ioc
->iocpf
.auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0;
2336 * Register mailbox message handler functions
2338 * @param[in] ioc IOC instance
2339 * @param[in] mcfuncs message class handler functions
2342 bfa_ioc_mbox_register(struct bfa_ioc_s
*ioc
, bfa_ioc_mbox_mcfunc_t
*mcfuncs
)
2344 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2347 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++)
2348 mod
->mbhdlr
[mc
].cbfn
= mcfuncs
[mc
];
2352 * Register mailbox message handler function, to be called by common modules
2355 bfa_ioc_mbox_regisr(struct bfa_ioc_s
*ioc
, enum bfi_mclass mc
,
2356 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
2358 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2360 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
2361 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
2365 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2366 * Responsibility of caller to serialize
2368 * @param[in] ioc IOC instance
2369 * @param[i] cmd Mailbox command
2372 bfa_ioc_mbox_queue(struct bfa_ioc_s
*ioc
, struct bfa_mbox_cmd_s
*cmd
)
2374 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2378 * If a previous command is pending, queue new command
2380 if (!list_empty(&mod
->cmd_q
)) {
2381 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2386 * If mailbox is busy, queue command for poll timer
2388 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
2390 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2395 * mailbox is free -- queue command to firmware
2397 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
2401 * Handle mailbox interrupts
2404 bfa_ioc_mbox_isr(struct bfa_ioc_s
*ioc
)
2406 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2407 struct bfi_mbmsg_s m
;
2410 if (bfa_ioc_msgget(ioc
, &m
)) {
2412 * Treat IOC message class as special.
2414 mc
= m
.mh
.msg_class
;
2415 if (mc
== BFI_MC_IOC
) {
2416 bfa_ioc_isr(ioc
, &m
);
2420 if ((mc
> BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
2423 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
2426 bfa_ioc_lpu_read_stat(ioc
);
2429 * Try to send pending mailbox commands
2431 bfa_ioc_mbox_poll(ioc
);
2435 bfa_ioc_error_isr(struct bfa_ioc_s
*ioc
)
2437 bfa_ioc_stats(ioc
, ioc_hbfails
);
2438 ioc
->stats
.hb_count
= ioc
->hb_count
;
2439 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
2443 * return true if IOC is disabled
2446 bfa_ioc_is_disabled(struct bfa_ioc_s
*ioc
)
2448 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
) ||
2449 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
);
2453 * Return TRUE if IOC is in acquiring address state
2456 bfa_ioc_is_acq_addr(struct bfa_ioc_s
*ioc
)
2458 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_acq_addr
);
2462 * return true if IOC firmware is different.
2465 bfa_ioc_fw_mismatch(struct bfa_ioc_s
*ioc
)
2467 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_reset
) ||
2468 bfa_fsm_cmp_state(&ioc
->iocpf
, bfa_iocpf_sm_fwcheck
) ||
2469 bfa_fsm_cmp_state(&ioc
->iocpf
, bfa_iocpf_sm_mismatch
);
2472 #define bfa_ioc_state_disabled(__sm) \
2473 (((__sm) == BFI_IOC_UNINIT) || \
2474 ((__sm) == BFI_IOC_INITING) || \
2475 ((__sm) == BFI_IOC_HWINIT) || \
2476 ((__sm) == BFI_IOC_DISABLED) || \
2477 ((__sm) == BFI_IOC_FAIL) || \
2478 ((__sm) == BFI_IOC_CFG_DISABLED))
2481 * Check if adapter is disabled -- both IOCs should be in a disabled
2485 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s
*ioc
)
2489 if (!bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
))
2492 ioc_state
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2493 if (!bfa_ioc_state_disabled(ioc_state
))
2496 if (ioc
->pcidev
.device_id
!= BFA_PCI_DEVICE_ID_FC_8G1P
) {
2497 ioc_state
= readl(ioc
->ioc_regs
.alt_ioc_fwstate
);
2498 if (!bfa_ioc_state_disabled(ioc_state
))
2506 * Reset IOC fwstate registers.
2509 bfa_ioc_reset_fwstate(struct bfa_ioc_s
*ioc
)
2511 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
2512 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2515 #define BFA_MFG_NAME "Brocade"
2517 bfa_ioc_get_adapter_attr(struct bfa_ioc_s
*ioc
,
2518 struct bfa_adapter_attr_s
*ad_attr
)
2520 struct bfi_ioc_attr_s
*ioc_attr
;
2522 ioc_attr
= ioc
->attr
;
2524 bfa_ioc_get_adapter_serial_num(ioc
, ad_attr
->serial_num
);
2525 bfa_ioc_get_adapter_fw_ver(ioc
, ad_attr
->fw_ver
);
2526 bfa_ioc_get_adapter_optrom_ver(ioc
, ad_attr
->optrom_ver
);
2527 bfa_ioc_get_adapter_manufacturer(ioc
, ad_attr
->manufacturer
);
2528 memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
2529 sizeof(struct bfa_mfg_vpd_s
));
2531 ad_attr
->nports
= bfa_ioc_get_nports(ioc
);
2532 ad_attr
->max_speed
= bfa_ioc_speed_sup(ioc
);
2534 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model
);
2535 /* For now, model descr uses same model string */
2536 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model_descr
);
2538 ad_attr
->card_type
= ioc_attr
->card_type
;
2539 ad_attr
->is_mezz
= bfa_mfg_is_mezz(ioc_attr
->card_type
);
2541 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
2542 ad_attr
->prototype
= 1;
2544 ad_attr
->prototype
= 0;
2546 ad_attr
->pwwn
= ioc
->attr
->pwwn
;
2547 ad_attr
->mac
= bfa_ioc_get_mac(ioc
);
2549 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
2550 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
2551 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
2552 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
2554 bfa_ioc_get_pci_chip_rev(ioc
, ad_attr
->hw_ver
);
2556 ad_attr
->cna_capable
= bfa_ioc_is_cna(ioc
);
2557 ad_attr
->trunk_capable
= (ad_attr
->nports
> 1) &&
2558 !bfa_ioc_is_cna(ioc
) && !ad_attr
->is_mezz
;
2562 bfa_ioc_get_type(struct bfa_ioc_s
*ioc
)
2564 if (ioc
->clscode
== BFI_PCIFN_CLASS_ETH
)
2565 return BFA_IOC_TYPE_LL
;
2567 WARN_ON(ioc
->clscode
!= BFI_PCIFN_CLASS_FC
);
2569 return (ioc
->attr
->port_mode
== BFI_PORT_MODE_FC
)
2570 ? BFA_IOC_TYPE_FC
: BFA_IOC_TYPE_FCoE
;
2574 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s
*ioc
, char *serial_num
)
2576 memset((void *)serial_num
, 0, BFA_ADAPTER_SERIAL_NUM_LEN
);
2577 memcpy((void *)serial_num
,
2578 (void *)ioc
->attr
->brcd_serialnum
,
2579 BFA_ADAPTER_SERIAL_NUM_LEN
);
2583 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s
*ioc
, char *fw_ver
)
2585 memset((void *)fw_ver
, 0, BFA_VERSION_LEN
);
2586 memcpy(fw_ver
, ioc
->attr
->fw_version
, BFA_VERSION_LEN
);
2590 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s
*ioc
, char *chip_rev
)
2594 memset((void *)chip_rev
, 0, BFA_IOC_CHIP_REV_LEN
);
2600 chip_rev
[4] = ioc
->attr
->asic_rev
;
2605 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s
*ioc
, char *optrom_ver
)
2607 memset((void *)optrom_ver
, 0, BFA_VERSION_LEN
);
2608 memcpy(optrom_ver
, ioc
->attr
->optrom_version
,
2613 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s
*ioc
, char *manufacturer
)
2615 memset((void *)manufacturer
, 0, BFA_ADAPTER_MFG_NAME_LEN
);
2616 memcpy(manufacturer
, BFA_MFG_NAME
, BFA_ADAPTER_MFG_NAME_LEN
);
2620 bfa_ioc_get_adapter_model(struct bfa_ioc_s
*ioc
, char *model
)
2622 struct bfi_ioc_attr_s
*ioc_attr
;
2625 memset((void *)model
, 0, BFA_ADAPTER_MODEL_NAME_LEN
);
2627 ioc_attr
= ioc
->attr
;
2629 snprintf(model
, BFA_ADAPTER_MODEL_NAME_LEN
, "%s-%u",
2630 BFA_MFG_NAME
, ioc_attr
->card_type
);
2634 bfa_ioc_get_state(struct bfa_ioc_s
*ioc
)
2636 enum bfa_iocpf_state iocpf_st
;
2637 enum bfa_ioc_state ioc_st
= bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
2639 if (ioc_st
== BFA_IOC_ENABLING
||
2640 ioc_st
== BFA_IOC_FAIL
|| ioc_st
== BFA_IOC_INITFAIL
) {
2642 iocpf_st
= bfa_sm_to_state(iocpf_sm_table
, ioc
->iocpf
.fsm
);
2645 case BFA_IOCPF_SEMWAIT
:
2646 ioc_st
= BFA_IOC_SEMWAIT
;
2649 case BFA_IOCPF_HWINIT
:
2650 ioc_st
= BFA_IOC_HWINIT
;
2653 case BFA_IOCPF_FWMISMATCH
:
2654 ioc_st
= BFA_IOC_FWMISMATCH
;
2657 case BFA_IOCPF_FAIL
:
2658 ioc_st
= BFA_IOC_FAIL
;
2661 case BFA_IOCPF_INITFAIL
:
2662 ioc_st
= BFA_IOC_INITFAIL
;
2674 bfa_ioc_get_attr(struct bfa_ioc_s
*ioc
, struct bfa_ioc_attr_s
*ioc_attr
)
2676 memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr_s
));
2678 ioc_attr
->state
= bfa_ioc_get_state(ioc
);
2679 ioc_attr
->port_id
= ioc
->port_id
;
2680 ioc_attr
->port_mode
= ioc
->port_mode
;
2681 ioc_attr
->port_mode_cfg
= ioc
->port_mode_cfg
;
2682 ioc_attr
->cap_bm
= ioc
->ad_cap_bm
;
2684 ioc_attr
->ioc_type
= bfa_ioc_get_type(ioc
);
2686 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
2688 ioc_attr
->pci_attr
.device_id
= ioc
->pcidev
.device_id
;
2689 ioc_attr
->pci_attr
.pcifn
= ioc
->pcidev
.pci_func
;
2690 bfa_ioc_get_pci_chip_rev(ioc
, ioc_attr
->pci_attr
.chip_rev
);
2694 bfa_ioc_get_mac(struct bfa_ioc_s
*ioc
)
2697 * Check the IOC type and return the appropriate MAC
2699 if (bfa_ioc_get_type(ioc
) == BFA_IOC_TYPE_FCoE
)
2700 return ioc
->attr
->fcoe_mac
;
2702 return ioc
->attr
->mac
;
2706 bfa_ioc_get_mfg_mac(struct bfa_ioc_s
*ioc
)
2710 m
= ioc
->attr
->mfg_mac
;
2711 if (bfa_mfg_is_old_wwn_mac_model(ioc
->attr
->card_type
))
2712 m
.mac
[MAC_ADDRLEN
- 1] += bfa_ioc_pcifn(ioc
);
2714 bfa_mfg_increment_wwn_mac(&(m
.mac
[MAC_ADDRLEN
-3]),
2715 bfa_ioc_pcifn(ioc
));
2721 * Send AEN notification
2724 bfa_ioc_aen_post(struct bfa_ioc_s
*ioc
, enum bfa_ioc_aen_event event
)
2726 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
2727 struct bfa_aen_entry_s
*aen_entry
;
2728 enum bfa_ioc_type_e ioc_type
;
2730 bfad_get_aen_entry(bfad
, aen_entry
);
2734 ioc_type
= bfa_ioc_get_type(ioc
);
2736 case BFA_IOC_TYPE_FC
:
2737 aen_entry
->aen_data
.ioc
.pwwn
= ioc
->attr
->pwwn
;
2739 case BFA_IOC_TYPE_FCoE
:
2740 aen_entry
->aen_data
.ioc
.pwwn
= ioc
->attr
->pwwn
;
2741 aen_entry
->aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2743 case BFA_IOC_TYPE_LL
:
2744 aen_entry
->aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2747 WARN_ON(ioc_type
!= BFA_IOC_TYPE_FC
);
2751 /* Send the AEN notification */
2752 aen_entry
->aen_data
.ioc
.ioc_type
= ioc_type
;
2753 bfad_im_post_vendor_event(aen_entry
, bfad
, ++ioc
->ioc_aen_seq
,
2754 BFA_AEN_CAT_IOC
, event
);
2758 * Retrieve saved firmware trace from a prior IOC failure.
2761 bfa_ioc_debug_fwsave(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2765 if (ioc
->dbg_fwsave_len
== 0)
2766 return BFA_STATUS_ENOFSAVE
;
2769 if (tlen
> ioc
->dbg_fwsave_len
)
2770 tlen
= ioc
->dbg_fwsave_len
;
2772 memcpy(trcdata
, ioc
->dbg_fwsave
, tlen
);
2774 return BFA_STATUS_OK
;
2779 * Retrieve saved firmware trace from a prior IOC failure.
2782 bfa_ioc_debug_fwtrc(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2784 u32 loff
= BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc
));
2786 bfa_status_t status
;
2788 bfa_trc(ioc
, *trclen
);
2791 if (tlen
> BFA_DBG_FWTRC_LEN
)
2792 tlen
= BFA_DBG_FWTRC_LEN
;
2794 status
= bfa_ioc_smem_read(ioc
, trcdata
, loff
, tlen
);
2800 bfa_ioc_send_fwsync(struct bfa_ioc_s
*ioc
)
2802 struct bfa_mbox_cmd_s cmd
;
2803 struct bfi_ioc_ctrl_req_s
*req
= (struct bfi_ioc_ctrl_req_s
*) cmd
.msg
;
2805 bfi_h2i_set(req
->mh
, BFI_MC_IOC
, BFI_IOC_H2I_DBG_SYNC
,
2806 bfa_ioc_portid(ioc
));
2807 req
->clscode
= cpu_to_be16(ioc
->clscode
);
2808 bfa_ioc_mbox_queue(ioc
, &cmd
);
2812 bfa_ioc_fwsync(struct bfa_ioc_s
*ioc
)
2814 u32 fwsync_iter
= 1000;
2816 bfa_ioc_send_fwsync(ioc
);
2819 * After sending a fw sync mbox command wait for it to
2820 * take effect. We will not wait for a response because
2821 * 1. fw_sync mbox cmd doesn't have a response.
2822 * 2. Even if we implement that, interrupts might not
2823 * be enabled when we call this function.
2824 * So, just keep checking if any mbox cmd is pending, and
2825 * after waiting for a reasonable amount of time, go ahead.
2826 * It is possible that fw has crashed and the mbox command
2827 * is never acknowledged.
2829 while (bfa_ioc_mbox_cmd_pending(ioc
) && fwsync_iter
> 0)
2834 * Dump firmware smem
2837 bfa_ioc_debug_fwcore(struct bfa_ioc_s
*ioc
, void *buf
,
2838 u32
*offset
, int *buflen
)
2842 bfa_status_t status
;
2843 u32 smem_len
= BFA_IOC_FW_SMEM_SIZE(ioc
);
2845 if (*offset
>= smem_len
) {
2846 *offset
= *buflen
= 0;
2847 return BFA_STATUS_EINVAL
;
2854 * First smem read, sync smem before proceeding
2855 * No need to sync before reading every chunk.
2858 bfa_ioc_fwsync(ioc
);
2860 if ((loff
+ dlen
) >= smem_len
)
2861 dlen
= smem_len
- loff
;
2863 status
= bfa_ioc_smem_read(ioc
, buf
, loff
, dlen
);
2865 if (status
!= BFA_STATUS_OK
) {
2866 *offset
= *buflen
= 0;
2872 if (*offset
>= smem_len
)
2881 * Firmware statistics
2884 bfa_ioc_fw_stats_get(struct bfa_ioc_s
*ioc
, void *stats
)
2886 u32 loff
= BFI_IOC_FWSTATS_OFF
+ \
2887 BFI_IOC_FWSTATS_SZ
* (bfa_ioc_portid(ioc
));
2889 bfa_status_t status
;
2891 if (ioc
->stats_busy
) {
2892 bfa_trc(ioc
, ioc
->stats_busy
);
2893 return BFA_STATUS_DEVBUSY
;
2895 ioc
->stats_busy
= BFA_TRUE
;
2897 tlen
= sizeof(struct bfa_fw_stats_s
);
2898 status
= bfa_ioc_smem_read(ioc
, stats
, loff
, tlen
);
2900 ioc
->stats_busy
= BFA_FALSE
;
2905 bfa_ioc_fw_stats_clear(struct bfa_ioc_s
*ioc
)
2907 u32 loff
= BFI_IOC_FWSTATS_OFF
+ \
2908 BFI_IOC_FWSTATS_SZ
* (bfa_ioc_portid(ioc
));
2910 bfa_status_t status
;
2912 if (ioc
->stats_busy
) {
2913 bfa_trc(ioc
, ioc
->stats_busy
);
2914 return BFA_STATUS_DEVBUSY
;
2916 ioc
->stats_busy
= BFA_TRUE
;
2918 tlen
= sizeof(struct bfa_fw_stats_s
);
2919 status
= bfa_ioc_smem_clr(ioc
, loff
, tlen
);
2921 ioc
->stats_busy
= BFA_FALSE
;
2926 * Save firmware trace if configured.
2929 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s
*ioc
)
2933 if (ioc
->dbg_fwsave_once
) {
2934 ioc
->dbg_fwsave_once
= BFA_FALSE
;
2935 if (ioc
->dbg_fwsave_len
) {
2936 tlen
= ioc
->dbg_fwsave_len
;
2937 bfa_ioc_debug_fwtrc(ioc
, ioc
->dbg_fwsave
, &tlen
);
2943 * Firmware failure detected. Start recovery actions.
2946 bfa_ioc_recover(struct bfa_ioc_s
*ioc
)
2948 bfa_ioc_stats(ioc
, ioc_hbfails
);
2949 ioc
->stats
.hb_count
= ioc
->hb_count
;
2950 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
2954 bfa_ioc_check_attr_wwns(struct bfa_ioc_s
*ioc
)
2956 if (bfa_ioc_get_type(ioc
) == BFA_IOC_TYPE_LL
)
2958 if (ioc
->attr
->nwwn
== 0)
2959 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_INVALID_NWWN
);
2960 if (ioc
->attr
->pwwn
== 0)
2961 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_INVALID_PWWN
);
2965 * BFA IOC PF private functions
2968 bfa_iocpf_timeout(void *ioc_arg
)
2970 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2973 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_TIMEOUT
);
2977 bfa_iocpf_sem_timeout(void *ioc_arg
)
2979 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2981 bfa_ioc_hw_sem_get(ioc
);
2985 bfa_ioc_poll_fwinit(struct bfa_ioc_s
*ioc
)
2987 u32 fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2989 bfa_trc(ioc
, fwstate
);
2991 if (fwstate
== BFI_IOC_DISABLED
) {
2992 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
2996 if (ioc
->iocpf
.poll_time
>= BFA_IOC_TOV
)
2997 bfa_iocpf_timeout(ioc
);
2999 ioc
->iocpf
.poll_time
+= BFA_IOC_POLL_TOV
;
3000 bfa_iocpf_poll_timer_start(ioc
);
3005 bfa_iocpf_poll_timeout(void *ioc_arg
)
3007 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
3009 bfa_ioc_poll_fwinit(ioc
);
3013 * bfa timer function
3016 bfa_timer_beat(struct bfa_timer_mod_s
*mod
)
3018 struct list_head
*qh
= &mod
->timer_q
;
3019 struct list_head
*qe
, *qe_next
;
3020 struct bfa_timer_s
*elem
;
3021 struct list_head timedout_q
;
3023 INIT_LIST_HEAD(&timedout_q
);
3025 qe
= bfa_q_next(qh
);
3028 qe_next
= bfa_q_next(qe
);
3030 elem
= (struct bfa_timer_s
*) qe
;
3031 if (elem
->timeout
<= BFA_TIMER_FREQ
) {
3033 list_del(&elem
->qe
);
3034 list_add_tail(&elem
->qe
, &timedout_q
);
3036 elem
->timeout
-= BFA_TIMER_FREQ
;
3039 qe
= qe_next
; /* go to next elem */
3043 * Pop all the timeout entries
3045 while (!list_empty(&timedout_q
)) {
3046 bfa_q_deq(&timedout_q
, &elem
);
3047 elem
->timercb(elem
->arg
);
3052 * Should be called with lock protection
3055 bfa_timer_begin(struct bfa_timer_mod_s
*mod
, struct bfa_timer_s
*timer
,
3056 void (*timercb
) (void *), void *arg
, unsigned int timeout
)
3059 WARN_ON(timercb
== NULL
);
3060 WARN_ON(bfa_q_is_on_q(&mod
->timer_q
, timer
));
3062 timer
->timeout
= timeout
;
3063 timer
->timercb
= timercb
;
3066 list_add_tail(&timer
->qe
, &mod
->timer_q
);
3070 * Should be called with lock protection
3073 bfa_timer_stop(struct bfa_timer_s
*timer
)
3075 WARN_ON(list_empty(&timer
->qe
));
3077 list_del(&timer
->qe
);
3081 * ASIC block related
3084 bfa_ablk_config_swap(struct bfa_ablk_cfg_s
*cfg
)
3086 struct bfa_ablk_cfg_inst_s
*cfg_inst
;
3091 for (i
= 0; i
< BFA_ABLK_MAX
; i
++) {
3092 cfg_inst
= &cfg
->inst
[i
];
3093 for (j
= 0; j
< BFA_ABLK_MAX_PFS
; j
++) {
3094 be16
= cfg_inst
->pf_cfg
[j
].pers
;
3095 cfg_inst
->pf_cfg
[j
].pers
= be16_to_cpu(be16
);
3096 be16
= cfg_inst
->pf_cfg
[j
].num_qpairs
;
3097 cfg_inst
->pf_cfg
[j
].num_qpairs
= be16_to_cpu(be16
);
3098 be16
= cfg_inst
->pf_cfg
[j
].num_vectors
;
3099 cfg_inst
->pf_cfg
[j
].num_vectors
= be16_to_cpu(be16
);
3100 be32
= cfg_inst
->pf_cfg
[j
].bw
;
3101 cfg_inst
->pf_cfg
[j
].bw
= be16_to_cpu(be32
);
3107 bfa_ablk_isr(void *cbarg
, struct bfi_mbmsg_s
*msg
)
3109 struct bfa_ablk_s
*ablk
= (struct bfa_ablk_s
*)cbarg
;
3110 struct bfi_ablk_i2h_rsp_s
*rsp
= (struct bfi_ablk_i2h_rsp_s
*)msg
;
3111 bfa_ablk_cbfn_t cbfn
;
3113 WARN_ON(msg
->mh
.msg_class
!= BFI_MC_ABLK
);
3114 bfa_trc(ablk
->ioc
, msg
->mh
.msg_id
);
3116 switch (msg
->mh
.msg_id
) {
3117 case BFI_ABLK_I2H_QUERY
:
3118 if (rsp
->status
== BFA_STATUS_OK
) {
3119 memcpy(ablk
->cfg
, ablk
->dma_addr
.kva
,
3120 sizeof(struct bfa_ablk_cfg_s
));
3121 bfa_ablk_config_swap(ablk
->cfg
);
3126 case BFI_ABLK_I2H_ADPT_CONFIG
:
3127 case BFI_ABLK_I2H_PORT_CONFIG
:
3128 /* update config port mode */
3129 ablk
->ioc
->port_mode_cfg
= rsp
->port_mode
;
3131 case BFI_ABLK_I2H_PF_DELETE
:
3132 case BFI_ABLK_I2H_PF_UPDATE
:
3133 case BFI_ABLK_I2H_OPTROM_ENABLE
:
3134 case BFI_ABLK_I2H_OPTROM_DISABLE
:
3138 case BFI_ABLK_I2H_PF_CREATE
:
3139 *(ablk
->pcifn
) = rsp
->pcifn
;
3147 ablk
->busy
= BFA_FALSE
;
3151 cbfn(ablk
->cbarg
, rsp
->status
);
3156 bfa_ablk_notify(void *cbarg
, enum bfa_ioc_event_e event
)
3158 struct bfa_ablk_s
*ablk
= (struct bfa_ablk_s
*)cbarg
;
3160 bfa_trc(ablk
->ioc
, event
);
3163 case BFA_IOC_E_ENABLED
:
3164 WARN_ON(ablk
->busy
!= BFA_FALSE
);
3167 case BFA_IOC_E_DISABLED
:
3168 case BFA_IOC_E_FAILED
:
3169 /* Fail any pending requests */
3173 ablk
->cbfn(ablk
->cbarg
, BFA_STATUS_FAILED
);
3175 ablk
->busy
= BFA_FALSE
;
3186 bfa_ablk_meminfo(void)
3188 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s
), BFA_DMA_ALIGN_SZ
);
3192 bfa_ablk_memclaim(struct bfa_ablk_s
*ablk
, u8
*dma_kva
, u64 dma_pa
)
3194 ablk
->dma_addr
.kva
= dma_kva
;
3195 ablk
->dma_addr
.pa
= dma_pa
;
3199 bfa_ablk_attach(struct bfa_ablk_s
*ablk
, struct bfa_ioc_s
*ioc
)
3203 bfa_ioc_mbox_regisr(ablk
->ioc
, BFI_MC_ABLK
, bfa_ablk_isr
, ablk
);
3204 bfa_q_qe_init(&ablk
->ioc_notify
);
3205 bfa_ioc_notify_init(&ablk
->ioc_notify
, bfa_ablk_notify
, ablk
);
3206 list_add_tail(&ablk
->ioc_notify
.qe
, &ablk
->ioc
->notify_q
);
3210 bfa_ablk_query(struct bfa_ablk_s
*ablk
, struct bfa_ablk_cfg_s
*ablk_cfg
,
3211 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3213 struct bfi_ablk_h2i_query_s
*m
;
3217 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3218 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3219 return BFA_STATUS_IOC_FAILURE
;
3223 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3224 return BFA_STATUS_DEVBUSY
;
3227 ablk
->cfg
= ablk_cfg
;
3229 ablk
->cbarg
= cbarg
;
3230 ablk
->busy
= BFA_TRUE
;
3232 m
= (struct bfi_ablk_h2i_query_s
*)ablk
->mb
.msg
;
3233 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_QUERY
,
3234 bfa_ioc_portid(ablk
->ioc
));
3235 bfa_dma_be_addr_set(m
->addr
, ablk
->dma_addr
.pa
);
3236 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3238 return BFA_STATUS_OK
;
3242 bfa_ablk_pf_create(struct bfa_ablk_s
*ablk
, u16
*pcifn
,
3243 u8 port
, enum bfi_pcifn_class personality
, int bw
,
3244 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3246 struct bfi_ablk_h2i_pf_req_s
*m
;
3248 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3249 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3250 return BFA_STATUS_IOC_FAILURE
;
3254 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3255 return BFA_STATUS_DEVBUSY
;
3258 ablk
->pcifn
= pcifn
;
3260 ablk
->cbarg
= cbarg
;
3261 ablk
->busy
= BFA_TRUE
;
3263 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3264 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_CREATE
,
3265 bfa_ioc_portid(ablk
->ioc
));
3266 m
->pers
= cpu_to_be16((u16
)personality
);
3267 m
->bw
= cpu_to_be32(bw
);
3269 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3271 return BFA_STATUS_OK
;
3275 bfa_ablk_pf_delete(struct bfa_ablk_s
*ablk
, int pcifn
,
3276 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3278 struct bfi_ablk_h2i_pf_req_s
*m
;
3280 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3281 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3282 return BFA_STATUS_IOC_FAILURE
;
3286 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3287 return BFA_STATUS_DEVBUSY
;
3291 ablk
->cbarg
= cbarg
;
3292 ablk
->busy
= BFA_TRUE
;
3294 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3295 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_DELETE
,
3296 bfa_ioc_portid(ablk
->ioc
));
3297 m
->pcifn
= (u8
)pcifn
;
3298 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3300 return BFA_STATUS_OK
;
3304 bfa_ablk_adapter_config(struct bfa_ablk_s
*ablk
, enum bfa_mode_s mode
,
3305 int max_pf
, int max_vf
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3307 struct bfi_ablk_h2i_cfg_req_s
*m
;
3309 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3310 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3311 return BFA_STATUS_IOC_FAILURE
;
3315 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3316 return BFA_STATUS_DEVBUSY
;
3320 ablk
->cbarg
= cbarg
;
3321 ablk
->busy
= BFA_TRUE
;
3323 m
= (struct bfi_ablk_h2i_cfg_req_s
*)ablk
->mb
.msg
;
3324 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_ADPT_CONFIG
,
3325 bfa_ioc_portid(ablk
->ioc
));
3327 m
->max_pf
= (u8
)max_pf
;
3328 m
->max_vf
= (u8
)max_vf
;
3329 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3331 return BFA_STATUS_OK
;
3335 bfa_ablk_port_config(struct bfa_ablk_s
*ablk
, int port
, enum bfa_mode_s mode
,
3336 int max_pf
, int max_vf
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3338 struct bfi_ablk_h2i_cfg_req_s
*m
;
3340 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3341 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3342 return BFA_STATUS_IOC_FAILURE
;
3346 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3347 return BFA_STATUS_DEVBUSY
;
3351 ablk
->cbarg
= cbarg
;
3352 ablk
->busy
= BFA_TRUE
;
3354 m
= (struct bfi_ablk_h2i_cfg_req_s
*)ablk
->mb
.msg
;
3355 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PORT_CONFIG
,
3356 bfa_ioc_portid(ablk
->ioc
));
3359 m
->max_pf
= (u8
)max_pf
;
3360 m
->max_vf
= (u8
)max_vf
;
3361 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3363 return BFA_STATUS_OK
;
3367 bfa_ablk_pf_update(struct bfa_ablk_s
*ablk
, int pcifn
, int bw
,
3368 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3370 struct bfi_ablk_h2i_pf_req_s
*m
;
3372 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3373 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3374 return BFA_STATUS_IOC_FAILURE
;
3378 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3379 return BFA_STATUS_DEVBUSY
;
3383 ablk
->cbarg
= cbarg
;
3384 ablk
->busy
= BFA_TRUE
;
3386 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3387 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_UPDATE
,
3388 bfa_ioc_portid(ablk
->ioc
));
3389 m
->pcifn
= (u8
)pcifn
;
3390 m
->bw
= cpu_to_be32(bw
);
3391 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3393 return BFA_STATUS_OK
;
3397 bfa_ablk_optrom_en(struct bfa_ablk_s
*ablk
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3399 struct bfi_ablk_h2i_optrom_s
*m
;
3401 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3402 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3403 return BFA_STATUS_IOC_FAILURE
;
3407 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3408 return BFA_STATUS_DEVBUSY
;
3412 ablk
->cbarg
= cbarg
;
3413 ablk
->busy
= BFA_TRUE
;
3415 m
= (struct bfi_ablk_h2i_optrom_s
*)ablk
->mb
.msg
;
3416 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_OPTROM_ENABLE
,
3417 bfa_ioc_portid(ablk
->ioc
));
3418 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3420 return BFA_STATUS_OK
;
3424 bfa_ablk_optrom_dis(struct bfa_ablk_s
*ablk
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3426 struct bfi_ablk_h2i_optrom_s
*m
;
3428 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3429 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3430 return BFA_STATUS_IOC_FAILURE
;
3434 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3435 return BFA_STATUS_DEVBUSY
;
3439 ablk
->cbarg
= cbarg
;
3440 ablk
->busy
= BFA_TRUE
;
3442 m
= (struct bfi_ablk_h2i_optrom_s
*)ablk
->mb
.msg
;
3443 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_OPTROM_DISABLE
,
3444 bfa_ioc_portid(ablk
->ioc
));
3445 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3447 return BFA_STATUS_OK
;
3451 * SFP module specific
3454 /* forward declarations */
3455 static void bfa_sfp_getdata_send(struct bfa_sfp_s
*sfp
);
3456 static void bfa_sfp_media_get(struct bfa_sfp_s
*sfp
);
3457 static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s
*sfp
,
3458 enum bfa_port_speed portspeed
);
3461 bfa_cb_sfp_show(struct bfa_sfp_s
*sfp
)
3463 bfa_trc(sfp
, sfp
->lock
);
3465 sfp
->cbfn(sfp
->cbarg
, sfp
->status
);
3471 bfa_cb_sfp_state_query(struct bfa_sfp_s
*sfp
)
3473 bfa_trc(sfp
, sfp
->portspeed
);
3475 bfa_sfp_media_get(sfp
);
3476 if (sfp
->state_query_cbfn
)
3477 sfp
->state_query_cbfn(sfp
->state_query_cbarg
,
3482 if (sfp
->portspeed
) {
3483 sfp
->status
= bfa_sfp_speed_valid(sfp
, sfp
->portspeed
);
3484 if (sfp
->state_query_cbfn
)
3485 sfp
->state_query_cbfn(sfp
->state_query_cbarg
,
3487 sfp
->portspeed
= BFA_PORT_SPEED_UNKNOWN
;
3490 sfp
->state_query_lock
= 0;
3491 sfp
->state_query_cbfn
= NULL
;
3495 * IOC event handler.
3498 bfa_sfp_notify(void *sfp_arg
, enum bfa_ioc_event_e event
)
3500 struct bfa_sfp_s
*sfp
= sfp_arg
;
3502 bfa_trc(sfp
, event
);
3503 bfa_trc(sfp
, sfp
->lock
);
3504 bfa_trc(sfp
, sfp
->state_query_lock
);
3507 case BFA_IOC_E_DISABLED
:
3508 case BFA_IOC_E_FAILED
:
3510 sfp
->status
= BFA_STATUS_IOC_FAILURE
;
3511 bfa_cb_sfp_show(sfp
);
3514 if (sfp
->state_query_lock
) {
3515 sfp
->status
= BFA_STATUS_IOC_FAILURE
;
3516 bfa_cb_sfp_state_query(sfp
);
3526 * SFP's State Change Notification post to AEN
3529 bfa_sfp_scn_aen_post(struct bfa_sfp_s
*sfp
, struct bfi_sfp_scn_s
*rsp
)
3531 struct bfad_s
*bfad
= (struct bfad_s
*)sfp
->ioc
->bfa
->bfad
;
3532 struct bfa_aen_entry_s
*aen_entry
;
3533 enum bfa_port_aen_event aen_evt
= 0;
3535 bfa_trc(sfp
, (((u64
)rsp
->pomlvl
) << 16) | (((u64
)rsp
->sfpid
) << 8) |
3538 bfad_get_aen_entry(bfad
, aen_entry
);
3542 aen_entry
->aen_data
.port
.ioc_type
= bfa_ioc_get_type(sfp
->ioc
);
3543 aen_entry
->aen_data
.port
.pwwn
= sfp
->ioc
->attr
->pwwn
;
3544 aen_entry
->aen_data
.port
.mac
= bfa_ioc_get_mac(sfp
->ioc
);
3546 switch (rsp
->event
) {
3547 case BFA_SFP_SCN_INSERTED
:
3548 aen_evt
= BFA_PORT_AEN_SFP_INSERT
;
3550 case BFA_SFP_SCN_REMOVED
:
3551 aen_evt
= BFA_PORT_AEN_SFP_REMOVE
;
3553 case BFA_SFP_SCN_FAILED
:
3554 aen_evt
= BFA_PORT_AEN_SFP_ACCESS_ERROR
;
3556 case BFA_SFP_SCN_UNSUPPORT
:
3557 aen_evt
= BFA_PORT_AEN_SFP_UNSUPPORT
;
3559 case BFA_SFP_SCN_POM
:
3560 aen_evt
= BFA_PORT_AEN_SFP_POM
;
3561 aen_entry
->aen_data
.port
.level
= rsp
->pomlvl
;
3564 bfa_trc(sfp
, rsp
->event
);
3568 /* Send the AEN notification */
3569 bfad_im_post_vendor_event(aen_entry
, bfad
, ++sfp
->ioc
->ioc_aen_seq
,
3570 BFA_AEN_CAT_PORT
, aen_evt
);
3577 bfa_sfp_getdata_send(struct bfa_sfp_s
*sfp
)
3579 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3581 bfa_trc(sfp
, req
->memtype
);
3583 /* build host command */
3584 bfi_h2i_set(req
->mh
, BFI_MC_SFP
, BFI_SFP_H2I_SHOW
,
3585 bfa_ioc_portid(sfp
->ioc
));
3588 bfa_ioc_mbox_queue(sfp
->ioc
, &sfp
->mbcmd
);
3592 * SFP is valid, read sfp data
3595 bfa_sfp_getdata(struct bfa_sfp_s
*sfp
, enum bfi_sfp_mem_e memtype
)
3597 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3599 WARN_ON(sfp
->lock
!= 0);
3600 bfa_trc(sfp
, sfp
->state
);
3603 sfp
->memtype
= memtype
;
3604 req
->memtype
= memtype
;
3607 bfa_alen_set(&req
->alen
, sizeof(struct sfp_mem_s
), sfp
->dbuf_pa
);
3609 bfa_sfp_getdata_send(sfp
);
3616 bfa_sfp_scn(struct bfa_sfp_s
*sfp
, struct bfi_mbmsg_s
*msg
)
3618 struct bfi_sfp_scn_s
*rsp
= (struct bfi_sfp_scn_s
*) msg
;
3620 switch (rsp
->event
) {
3621 case BFA_SFP_SCN_INSERTED
:
3622 sfp
->state
= BFA_SFP_STATE_INSERTED
;
3623 sfp
->data_valid
= 0;
3624 bfa_sfp_scn_aen_post(sfp
, rsp
);
3626 case BFA_SFP_SCN_REMOVED
:
3627 sfp
->state
= BFA_SFP_STATE_REMOVED
;
3628 sfp
->data_valid
= 0;
3629 bfa_sfp_scn_aen_post(sfp
, rsp
);
3631 case BFA_SFP_SCN_FAILED
:
3632 sfp
->state
= BFA_SFP_STATE_FAILED
;
3633 sfp
->data_valid
= 0;
3634 bfa_sfp_scn_aen_post(sfp
, rsp
);
3636 case BFA_SFP_SCN_UNSUPPORT
:
3637 sfp
->state
= BFA_SFP_STATE_UNSUPPORT
;
3638 bfa_sfp_scn_aen_post(sfp
, rsp
);
3640 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3642 case BFA_SFP_SCN_POM
:
3643 bfa_sfp_scn_aen_post(sfp
, rsp
);
3645 case BFA_SFP_SCN_VALID
:
3646 sfp
->state
= BFA_SFP_STATE_VALID
;
3648 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3651 bfa_trc(sfp
, rsp
->event
);
3660 bfa_sfp_show_comp(struct bfa_sfp_s
*sfp
, struct bfi_mbmsg_s
*msg
)
3662 struct bfi_sfp_rsp_s
*rsp
= (struct bfi_sfp_rsp_s
*) msg
;
3666 * receiving response after ioc failure
3668 bfa_trc(sfp
, sfp
->lock
);
3672 bfa_trc(sfp
, rsp
->status
);
3673 if (rsp
->status
== BFA_STATUS_OK
) {
3674 sfp
->data_valid
= 1;
3675 if (sfp
->state
== BFA_SFP_STATE_VALID
)
3676 sfp
->status
= BFA_STATUS_OK
;
3677 else if (sfp
->state
== BFA_SFP_STATE_UNSUPPORT
)
3678 sfp
->status
= BFA_STATUS_SFP_UNSUPP
;
3680 bfa_trc(sfp
, sfp
->state
);
3682 sfp
->data_valid
= 0;
3683 sfp
->status
= rsp
->status
;
3684 /* sfpshow shouldn't change sfp state */
3687 bfa_trc(sfp
, sfp
->memtype
);
3688 if (sfp
->memtype
== BFI_SFP_MEM_DIAGEXT
) {
3689 bfa_trc(sfp
, sfp
->data_valid
);
3690 if (sfp
->data_valid
) {
3691 u32 size
= sizeof(struct sfp_mem_s
);
3692 u8
*des
= (u8
*) &(sfp
->sfpmem
->srlid_base
);
3693 memcpy(des
, sfp
->dbuf_kva
, size
);
3696 * Queue completion callback.
3698 bfa_cb_sfp_show(sfp
);
3702 bfa_trc(sfp
, sfp
->state_query_lock
);
3703 if (sfp
->state_query_lock
) {
3704 sfp
->state
= rsp
->state
;
3705 /* Complete callback */
3706 bfa_cb_sfp_state_query(sfp
);
3711 * SFP query fw sfp state
3714 bfa_sfp_state_query(struct bfa_sfp_s
*sfp
)
3716 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3718 /* Should not be doing query if not in _INIT state */
3719 WARN_ON(sfp
->state
!= BFA_SFP_STATE_INIT
);
3720 WARN_ON(sfp
->state_query_lock
!= 0);
3721 bfa_trc(sfp
, sfp
->state
);
3723 sfp
->state_query_lock
= 1;
3727 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3731 bfa_sfp_media_get(struct bfa_sfp_s
*sfp
)
3733 enum bfa_defs_sfp_media_e
*media
= sfp
->media
;
3735 *media
= BFA_SFP_MEDIA_UNKNOWN
;
3737 if (sfp
->state
== BFA_SFP_STATE_UNSUPPORT
)
3738 *media
= BFA_SFP_MEDIA_UNSUPPORT
;
3739 else if (sfp
->state
== BFA_SFP_STATE_VALID
) {
3740 union sfp_xcvr_e10g_code_u e10g
;
3741 struct sfp_mem_s
*sfpmem
= (struct sfp_mem_s
*)sfp
->dbuf_kva
;
3742 u16 xmtr_tech
= (sfpmem
->srlid_base
.xcvr
[4] & 0x3) << 7 |
3743 (sfpmem
->srlid_base
.xcvr
[5] >> 1);
3745 e10g
.b
= sfpmem
->srlid_base
.xcvr
[0];
3746 bfa_trc(sfp
, e10g
.b
);
3747 bfa_trc(sfp
, xmtr_tech
);
3748 /* check fc transmitter tech */
3749 if ((xmtr_tech
& SFP_XMTR_TECH_CU
) ||
3750 (xmtr_tech
& SFP_XMTR_TECH_CP
) ||
3751 (xmtr_tech
& SFP_XMTR_TECH_CA
))
3752 *media
= BFA_SFP_MEDIA_CU
;
3753 else if ((xmtr_tech
& SFP_XMTR_TECH_EL_INTRA
) ||
3754 (xmtr_tech
& SFP_XMTR_TECH_EL_INTER
))
3755 *media
= BFA_SFP_MEDIA_EL
;
3756 else if ((xmtr_tech
& SFP_XMTR_TECH_LL
) ||
3757 (xmtr_tech
& SFP_XMTR_TECH_LC
))
3758 *media
= BFA_SFP_MEDIA_LW
;
3759 else if ((xmtr_tech
& SFP_XMTR_TECH_SL
) ||
3760 (xmtr_tech
& SFP_XMTR_TECH_SN
) ||
3761 (xmtr_tech
& SFP_XMTR_TECH_SA
))
3762 *media
= BFA_SFP_MEDIA_SW
;
3763 /* Check 10G Ethernet Compilance code */
3764 else if (e10g
.r
.e10g_sr
)
3765 *media
= BFA_SFP_MEDIA_SW
;
3766 else if (e10g
.r
.e10g_lrm
&& e10g
.r
.e10g_lr
)
3767 *media
= BFA_SFP_MEDIA_LW
;
3768 else if (e10g
.r
.e10g_unall
)
3769 *media
= BFA_SFP_MEDIA_UNKNOWN
;
3773 bfa_trc(sfp
, sfp
->state
);
3777 bfa_sfp_speed_valid(struct bfa_sfp_s
*sfp
, enum bfa_port_speed portspeed
)
3779 struct sfp_mem_s
*sfpmem
= (struct sfp_mem_s
*)sfp
->dbuf_kva
;
3780 struct sfp_xcvr_s
*xcvr
= (struct sfp_xcvr_s
*) sfpmem
->srlid_base
.xcvr
;
3781 union sfp_xcvr_fc3_code_u fc3
= xcvr
->fc3
;
3782 union sfp_xcvr_e10g_code_u e10g
= xcvr
->e10g
;
3784 if (portspeed
== BFA_PORT_SPEED_10GBPS
) {
3785 if (e10g
.r
.e10g_sr
|| e10g
.r
.e10g_lr
)
3786 return BFA_STATUS_OK
;
3788 bfa_trc(sfp
, e10g
.b
);
3789 return BFA_STATUS_UNSUPP_SPEED
;
3792 if (((portspeed
& BFA_PORT_SPEED_16GBPS
) && fc3
.r
.mb1600
) ||
3793 ((portspeed
& BFA_PORT_SPEED_8GBPS
) && fc3
.r
.mb800
) ||
3794 ((portspeed
& BFA_PORT_SPEED_4GBPS
) && fc3
.r
.mb400
) ||
3795 ((portspeed
& BFA_PORT_SPEED_2GBPS
) && fc3
.r
.mb200
) ||
3796 ((portspeed
& BFA_PORT_SPEED_1GBPS
) && fc3
.r
.mb100
))
3797 return BFA_STATUS_OK
;
3799 bfa_trc(sfp
, portspeed
);
3800 bfa_trc(sfp
, fc3
.b
);
3801 bfa_trc(sfp
, e10g
.b
);
3802 return BFA_STATUS_UNSUPP_SPEED
;
3810 bfa_sfp_intr(void *sfparg
, struct bfi_mbmsg_s
*msg
)
3812 struct bfa_sfp_s
*sfp
= sfparg
;
3814 switch (msg
->mh
.msg_id
) {
3815 case BFI_SFP_I2H_SHOW
:
3816 bfa_sfp_show_comp(sfp
, msg
);
3819 case BFI_SFP_I2H_SCN
:
3820 bfa_sfp_scn(sfp
, msg
);
3824 bfa_trc(sfp
, msg
->mh
.msg_id
);
3830 * Return DMA memory needed by sfp module.
3833 bfa_sfp_meminfo(void)
3835 return BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3839 * Attach virtual and physical memory for SFP.
3842 bfa_sfp_attach(struct bfa_sfp_s
*sfp
, struct bfa_ioc_s
*ioc
, void *dev
,
3843 struct bfa_trc_mod_s
*trcmod
)
3847 sfp
->trcmod
= trcmod
;
3853 sfp
->data_valid
= 0;
3854 sfp
->state
= BFA_SFP_STATE_INIT
;
3855 sfp
->state_query_lock
= 0;
3856 sfp
->state_query_cbfn
= NULL
;
3857 sfp
->state_query_cbarg
= NULL
;
3859 sfp
->portspeed
= BFA_PORT_SPEED_UNKNOWN
;
3860 sfp
->is_elb
= BFA_FALSE
;
3862 bfa_ioc_mbox_regisr(sfp
->ioc
, BFI_MC_SFP
, bfa_sfp_intr
, sfp
);
3863 bfa_q_qe_init(&sfp
->ioc_notify
);
3864 bfa_ioc_notify_init(&sfp
->ioc_notify
, bfa_sfp_notify
, sfp
);
3865 list_add_tail(&sfp
->ioc_notify
.qe
, &sfp
->ioc
->notify_q
);
3869 * Claim Memory for SFP
3872 bfa_sfp_memclaim(struct bfa_sfp_s
*sfp
, u8
*dm_kva
, u64 dm_pa
)
3874 sfp
->dbuf_kva
= dm_kva
;
3875 sfp
->dbuf_pa
= dm_pa
;
3876 memset(sfp
->dbuf_kva
, 0, sizeof(struct sfp_mem_s
));
3878 dm_kva
+= BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3879 dm_pa
+= BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3883 * Show SFP eeprom content
3885 * @param[in] sfp - bfa sfp module
3887 * @param[out] sfpmem - sfp eeprom data
3891 bfa_sfp_show(struct bfa_sfp_s
*sfp
, struct sfp_mem_s
*sfpmem
,
3892 bfa_cb_sfp_t cbfn
, void *cbarg
)
3895 if (!bfa_ioc_is_operational(sfp
->ioc
)) {
3897 return BFA_STATUS_IOC_NON_OP
;
3902 return BFA_STATUS_DEVBUSY
;
3907 sfp
->sfpmem
= sfpmem
;
3909 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_DIAGEXT
);
3910 return BFA_STATUS_OK
;
3914 * Return SFP Media type
3916 * @param[in] sfp - bfa sfp module
3918 * @param[out] media - port speed from user
3922 bfa_sfp_media(struct bfa_sfp_s
*sfp
, enum bfa_defs_sfp_media_e
*media
,
3923 bfa_cb_sfp_t cbfn
, void *cbarg
)
3925 if (!bfa_ioc_is_operational(sfp
->ioc
)) {
3927 return BFA_STATUS_IOC_NON_OP
;
3931 if (sfp
->state
== BFA_SFP_STATE_INIT
) {
3932 if (sfp
->state_query_lock
) {
3934 return BFA_STATUS_DEVBUSY
;
3936 sfp
->state_query_cbfn
= cbfn
;
3937 sfp
->state_query_cbarg
= cbarg
;
3938 bfa_sfp_state_query(sfp
);
3939 return BFA_STATUS_SFP_NOT_READY
;
3943 bfa_sfp_media_get(sfp
);
3944 return BFA_STATUS_OK
;
3948 * Check if user set port speed is allowed by the SFP
3950 * @param[in] sfp - bfa sfp module
3951 * @param[in] portspeed - port speed from user
3955 bfa_sfp_speed(struct bfa_sfp_s
*sfp
, enum bfa_port_speed portspeed
,
3956 bfa_cb_sfp_t cbfn
, void *cbarg
)
3958 WARN_ON(portspeed
== BFA_PORT_SPEED_UNKNOWN
);
3960 if (!bfa_ioc_is_operational(sfp
->ioc
))
3961 return BFA_STATUS_IOC_NON_OP
;
3963 /* For Mezz card, all speed is allowed */
3964 if (bfa_mfg_is_mezz(sfp
->ioc
->attr
->card_type
))
3965 return BFA_STATUS_OK
;
3967 /* Check SFP state */
3968 sfp
->portspeed
= portspeed
;
3969 if (sfp
->state
== BFA_SFP_STATE_INIT
) {
3970 if (sfp
->state_query_lock
) {
3972 return BFA_STATUS_DEVBUSY
;
3974 sfp
->state_query_cbfn
= cbfn
;
3975 sfp
->state_query_cbarg
= cbarg
;
3976 bfa_sfp_state_query(sfp
);
3977 return BFA_STATUS_SFP_NOT_READY
;
3981 if (sfp
->state
== BFA_SFP_STATE_REMOVED
||
3982 sfp
->state
== BFA_SFP_STATE_FAILED
) {
3983 bfa_trc(sfp
, sfp
->state
);
3984 return BFA_STATUS_NO_SFP_DEV
;
3987 if (sfp
->state
== BFA_SFP_STATE_INSERTED
) {
3988 bfa_trc(sfp
, sfp
->state
);
3989 return BFA_STATUS_DEVBUSY
; /* sfp is reading data */
3992 /* For eloopback, all speed is allowed */
3994 return BFA_STATUS_OK
;
3996 return bfa_sfp_speed_valid(sfp
, portspeed
);
4000 * Flash module specific
4004 * FLASH DMA buffer should be big enough to hold both MFG block and
4005 * asic block(64k) at the same time and also should be 2k aligned to
4006 * avoid write segement to cross sector boundary.
4008 #define BFA_FLASH_SEG_SZ 2048
4009 #define BFA_FLASH_DMA_BUF_SZ \
4010 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4013 bfa_flash_aen_audit_post(struct bfa_ioc_s
*ioc
, enum bfa_audit_aen_event event
,
4016 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
4017 struct bfa_aen_entry_s
*aen_entry
;
4019 bfad_get_aen_entry(bfad
, aen_entry
);
4023 aen_entry
->aen_data
.audit
.pwwn
= ioc
->attr
->pwwn
;
4024 aen_entry
->aen_data
.audit
.partition_inst
= inst
;
4025 aen_entry
->aen_data
.audit
.partition_type
= type
;
4027 /* Send the AEN notification */
4028 bfad_im_post_vendor_event(aen_entry
, bfad
, ++ioc
->ioc_aen_seq
,
4029 BFA_AEN_CAT_AUDIT
, event
);
4033 bfa_flash_cb(struct bfa_flash_s
*flash
)
4037 flash
->cbfn(flash
->cbarg
, flash
->status
);
4041 bfa_flash_notify(void *cbarg
, enum bfa_ioc_event_e event
)
4043 struct bfa_flash_s
*flash
= cbarg
;
4045 bfa_trc(flash
, event
);
4047 case BFA_IOC_E_DISABLED
:
4048 case BFA_IOC_E_FAILED
:
4049 if (flash
->op_busy
) {
4050 flash
->status
= BFA_STATUS_IOC_FAILURE
;
4051 flash
->cbfn(flash
->cbarg
, flash
->status
);
4062 * Send flash attribute query request.
4064 * @param[in] cbarg - callback argument
4067 bfa_flash_query_send(void *cbarg
)
4069 struct bfa_flash_s
*flash
= cbarg
;
4070 struct bfi_flash_query_req_s
*msg
=
4071 (struct bfi_flash_query_req_s
*) flash
->mb
.msg
;
4073 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_QUERY_REQ
,
4074 bfa_ioc_portid(flash
->ioc
));
4075 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_flash_attr_s
),
4077 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4081 * Send flash write request.
4083 * @param[in] cbarg - callback argument
4086 bfa_flash_write_send(struct bfa_flash_s
*flash
)
4088 struct bfi_flash_write_req_s
*msg
=
4089 (struct bfi_flash_write_req_s
*) flash
->mb
.msg
;
4092 msg
->type
= be32_to_cpu(flash
->type
);
4093 msg
->instance
= flash
->instance
;
4094 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
4095 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
4096 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
4097 msg
->length
= be32_to_cpu(len
);
4099 /* indicate if it's the last msg of the whole write operation */
4100 msg
->last
= (len
== flash
->residue
) ? 1 : 0;
4102 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_WRITE_REQ
,
4103 bfa_ioc_portid(flash
->ioc
));
4104 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
4105 memcpy(flash
->dbuf_kva
, flash
->ubuf
+ flash
->offset
, len
);
4106 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4108 flash
->residue
-= len
;
4109 flash
->offset
+= len
;
4113 * Send flash read request.
4115 * @param[in] cbarg - callback argument
4118 bfa_flash_read_send(void *cbarg
)
4120 struct bfa_flash_s
*flash
= cbarg
;
4121 struct bfi_flash_read_req_s
*msg
=
4122 (struct bfi_flash_read_req_s
*) flash
->mb
.msg
;
4125 msg
->type
= be32_to_cpu(flash
->type
);
4126 msg
->instance
= flash
->instance
;
4127 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
4128 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
4129 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
4130 msg
->length
= be32_to_cpu(len
);
4131 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_READ_REQ
,
4132 bfa_ioc_portid(flash
->ioc
));
4133 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
4134 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4138 * Send flash erase request.
4140 * @param[in] cbarg - callback argument
4143 bfa_flash_erase_send(void *cbarg
)
4145 struct bfa_flash_s
*flash
= cbarg
;
4146 struct bfi_flash_erase_req_s
*msg
=
4147 (struct bfi_flash_erase_req_s
*) flash
->mb
.msg
;
4149 msg
->type
= be32_to_cpu(flash
->type
);
4150 msg
->instance
= flash
->instance
;
4151 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_ERASE_REQ
,
4152 bfa_ioc_portid(flash
->ioc
));
4153 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4157 * Process flash response messages upon receiving interrupts.
4159 * @param[in] flasharg - flash structure
4160 * @param[in] msg - message structure
4163 bfa_flash_intr(void *flasharg
, struct bfi_mbmsg_s
*msg
)
4165 struct bfa_flash_s
*flash
= flasharg
;
4169 struct bfi_flash_query_rsp_s
*query
;
4170 struct bfi_flash_erase_rsp_s
*erase
;
4171 struct bfi_flash_write_rsp_s
*write
;
4172 struct bfi_flash_read_rsp_s
*read
;
4173 struct bfi_flash_event_s
*event
;
4174 struct bfi_mbmsg_s
*msg
;
4178 bfa_trc(flash
, msg
->mh
.msg_id
);
4180 if (!flash
->op_busy
&& msg
->mh
.msg_id
!= BFI_FLASH_I2H_EVENT
) {
4181 /* receiving response after ioc failure */
4182 bfa_trc(flash
, 0x9999);
4186 switch (msg
->mh
.msg_id
) {
4187 case BFI_FLASH_I2H_QUERY_RSP
:
4188 status
= be32_to_cpu(m
.query
->status
);
4189 bfa_trc(flash
, status
);
4190 if (status
== BFA_STATUS_OK
) {
4192 struct bfa_flash_attr_s
*attr
, *f
;
4194 attr
= (struct bfa_flash_attr_s
*) flash
->ubuf
;
4195 f
= (struct bfa_flash_attr_s
*) flash
->dbuf_kva
;
4196 attr
->status
= be32_to_cpu(f
->status
);
4197 attr
->npart
= be32_to_cpu(f
->npart
);
4198 bfa_trc(flash
, attr
->status
);
4199 bfa_trc(flash
, attr
->npart
);
4200 for (i
= 0; i
< attr
->npart
; i
++) {
4201 attr
->part
[i
].part_type
=
4202 be32_to_cpu(f
->part
[i
].part_type
);
4203 attr
->part
[i
].part_instance
=
4204 be32_to_cpu(f
->part
[i
].part_instance
);
4205 attr
->part
[i
].part_off
=
4206 be32_to_cpu(f
->part
[i
].part_off
);
4207 attr
->part
[i
].part_size
=
4208 be32_to_cpu(f
->part
[i
].part_size
);
4209 attr
->part
[i
].part_len
=
4210 be32_to_cpu(f
->part
[i
].part_len
);
4211 attr
->part
[i
].part_status
=
4212 be32_to_cpu(f
->part
[i
].part_status
);
4215 flash
->status
= status
;
4216 bfa_flash_cb(flash
);
4218 case BFI_FLASH_I2H_ERASE_RSP
:
4219 status
= be32_to_cpu(m
.erase
->status
);
4220 bfa_trc(flash
, status
);
4221 flash
->status
= status
;
4222 bfa_flash_cb(flash
);
4224 case BFI_FLASH_I2H_WRITE_RSP
:
4225 status
= be32_to_cpu(m
.write
->status
);
4226 bfa_trc(flash
, status
);
4227 if (status
!= BFA_STATUS_OK
|| flash
->residue
== 0) {
4228 flash
->status
= status
;
4229 bfa_flash_cb(flash
);
4231 bfa_trc(flash
, flash
->offset
);
4232 bfa_flash_write_send(flash
);
4235 case BFI_FLASH_I2H_READ_RSP
:
4236 status
= be32_to_cpu(m
.read
->status
);
4237 bfa_trc(flash
, status
);
4238 if (status
!= BFA_STATUS_OK
) {
4239 flash
->status
= status
;
4240 bfa_flash_cb(flash
);
4242 u32 len
= be32_to_cpu(m
.read
->length
);
4243 bfa_trc(flash
, flash
->offset
);
4244 bfa_trc(flash
, len
);
4245 memcpy(flash
->ubuf
+ flash
->offset
,
4246 flash
->dbuf_kva
, len
);
4247 flash
->residue
-= len
;
4248 flash
->offset
+= len
;
4249 if (flash
->residue
== 0) {
4250 flash
->status
= status
;
4251 bfa_flash_cb(flash
);
4253 bfa_flash_read_send(flash
);
4256 case BFI_FLASH_I2H_BOOT_VER_RSP
:
4258 case BFI_FLASH_I2H_EVENT
:
4259 status
= be32_to_cpu(m
.event
->status
);
4260 bfa_trc(flash
, status
);
4261 if (status
== BFA_STATUS_BAD_FWCFG
)
4262 bfa_ioc_aen_post(flash
->ioc
, BFA_IOC_AEN_FWCFG_ERROR
);
4263 else if (status
== BFA_STATUS_INVALID_VENDOR
) {
4265 param
= be32_to_cpu(m
.event
->param
);
4266 bfa_trc(flash
, param
);
4267 bfa_ioc_aen_post(flash
->ioc
,
4268 BFA_IOC_AEN_INVALID_VENDOR
);
4278 * Flash memory info API.
4280 * @param[in] mincfg - minimal cfg variable
4283 bfa_flash_meminfo(bfa_boolean_t mincfg
)
4285 /* min driver doesn't need flash */
4288 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4294 * @param[in] flash - flash structure
4295 * @param[in] ioc - ioc structure
4296 * @param[in] dev - device structure
4297 * @param[in] trcmod - trace module
4298 * @param[in] logmod - log module
4301 bfa_flash_attach(struct bfa_flash_s
*flash
, struct bfa_ioc_s
*ioc
, void *dev
,
4302 struct bfa_trc_mod_s
*trcmod
, bfa_boolean_t mincfg
)
4305 flash
->trcmod
= trcmod
;
4307 flash
->cbarg
= NULL
;
4310 bfa_ioc_mbox_regisr(flash
->ioc
, BFI_MC_FLASH
, bfa_flash_intr
, flash
);
4311 bfa_q_qe_init(&flash
->ioc_notify
);
4312 bfa_ioc_notify_init(&flash
->ioc_notify
, bfa_flash_notify
, flash
);
4313 list_add_tail(&flash
->ioc_notify
.qe
, &flash
->ioc
->notify_q
);
4315 /* min driver doesn't need flash */
4317 flash
->dbuf_kva
= NULL
;
4323 * Claim memory for flash
4325 * @param[in] flash - flash structure
4326 * @param[in] dm_kva - pointer to virtual memory address
4327 * @param[in] dm_pa - physical memory address
4328 * @param[in] mincfg - minimal cfg variable
4331 bfa_flash_memclaim(struct bfa_flash_s
*flash
, u8
*dm_kva
, u64 dm_pa
,
4332 bfa_boolean_t mincfg
)
4337 flash
->dbuf_kva
= dm_kva
;
4338 flash
->dbuf_pa
= dm_pa
;
4339 memset(flash
->dbuf_kva
, 0, BFA_FLASH_DMA_BUF_SZ
);
4340 dm_kva
+= BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4341 dm_pa
+= BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4345 * Get flash attribute.
4347 * @param[in] flash - flash structure
4348 * @param[in] attr - flash attribute structure
4349 * @param[in] cbfn - callback function
4350 * @param[in] cbarg - callback argument
4355 bfa_flash_get_attr(struct bfa_flash_s
*flash
, struct bfa_flash_attr_s
*attr
,
4356 bfa_cb_flash_t cbfn
, void *cbarg
)
4358 bfa_trc(flash
, BFI_FLASH_H2I_QUERY_REQ
);
4360 if (!bfa_ioc_is_operational(flash
->ioc
))
4361 return BFA_STATUS_IOC_NON_OP
;
4363 if (flash
->op_busy
) {
4364 bfa_trc(flash
, flash
->op_busy
);
4365 return BFA_STATUS_DEVBUSY
;
4370 flash
->cbarg
= cbarg
;
4371 flash
->ubuf
= (u8
*) attr
;
4372 bfa_flash_query_send(flash
);
4374 return BFA_STATUS_OK
;
4378 * Erase flash partition.
4380 * @param[in] flash - flash structure
4381 * @param[in] type - flash partition type
4382 * @param[in] instance - flash partition instance
4383 * @param[in] cbfn - callback function
4384 * @param[in] cbarg - callback argument
4389 bfa_flash_erase_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4390 u8 instance
, bfa_cb_flash_t cbfn
, void *cbarg
)
4392 bfa_trc(flash
, BFI_FLASH_H2I_ERASE_REQ
);
4393 bfa_trc(flash
, type
);
4394 bfa_trc(flash
, instance
);
4396 if (!bfa_ioc_is_operational(flash
->ioc
))
4397 return BFA_STATUS_IOC_NON_OP
;
4399 if (flash
->op_busy
) {
4400 bfa_trc(flash
, flash
->op_busy
);
4401 return BFA_STATUS_DEVBUSY
;
4406 flash
->cbarg
= cbarg
;
4408 flash
->instance
= instance
;
4410 bfa_flash_erase_send(flash
);
4411 bfa_flash_aen_audit_post(flash
->ioc
, BFA_AUDIT_AEN_FLASH_ERASE
,
4413 return BFA_STATUS_OK
;
4417 * Update flash partition.
4419 * @param[in] flash - flash structure
4420 * @param[in] type - flash partition type
4421 * @param[in] instance - flash partition instance
4422 * @param[in] buf - update data buffer
4423 * @param[in] len - data buffer length
4424 * @param[in] offset - offset relative to the partition starting address
4425 * @param[in] cbfn - callback function
4426 * @param[in] cbarg - callback argument
4431 bfa_flash_update_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4432 u8 instance
, void *buf
, u32 len
, u32 offset
,
4433 bfa_cb_flash_t cbfn
, void *cbarg
)
4435 bfa_trc(flash
, BFI_FLASH_H2I_WRITE_REQ
);
4436 bfa_trc(flash
, type
);
4437 bfa_trc(flash
, instance
);
4438 bfa_trc(flash
, len
);
4439 bfa_trc(flash
, offset
);
4441 if (!bfa_ioc_is_operational(flash
->ioc
))
4442 return BFA_STATUS_IOC_NON_OP
;
4445 * 'len' must be in word (4-byte) boundary
4446 * 'offset' must be in sector (16kb) boundary
4448 if (!len
|| (len
& 0x03) || (offset
& 0x00003FFF))
4449 return BFA_STATUS_FLASH_BAD_LEN
;
4451 if (type
== BFA_FLASH_PART_MFG
)
4452 return BFA_STATUS_EINVAL
;
4454 if (flash
->op_busy
) {
4455 bfa_trc(flash
, flash
->op_busy
);
4456 return BFA_STATUS_DEVBUSY
;
4461 flash
->cbarg
= cbarg
;
4463 flash
->instance
= instance
;
4464 flash
->residue
= len
;
4466 flash
->addr_off
= offset
;
4469 bfa_flash_write_send(flash
);
4470 return BFA_STATUS_OK
;
4474 * Read flash partition.
4476 * @param[in] flash - flash structure
4477 * @param[in] type - flash partition type
4478 * @param[in] instance - flash partition instance
4479 * @param[in] buf - read data buffer
4480 * @param[in] len - data buffer length
4481 * @param[in] offset - offset relative to the partition starting address
4482 * @param[in] cbfn - callback function
4483 * @param[in] cbarg - callback argument
4488 bfa_flash_read_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4489 u8 instance
, void *buf
, u32 len
, u32 offset
,
4490 bfa_cb_flash_t cbfn
, void *cbarg
)
4492 bfa_trc(flash
, BFI_FLASH_H2I_READ_REQ
);
4493 bfa_trc(flash
, type
);
4494 bfa_trc(flash
, instance
);
4495 bfa_trc(flash
, len
);
4496 bfa_trc(flash
, offset
);
4498 if (!bfa_ioc_is_operational(flash
->ioc
))
4499 return BFA_STATUS_IOC_NON_OP
;
4502 * 'len' must be in word (4-byte) boundary
4503 * 'offset' must be in sector (16kb) boundary
4505 if (!len
|| (len
& 0x03) || (offset
& 0x00003FFF))
4506 return BFA_STATUS_FLASH_BAD_LEN
;
4508 if (flash
->op_busy
) {
4509 bfa_trc(flash
, flash
->op_busy
);
4510 return BFA_STATUS_DEVBUSY
;
4515 flash
->cbarg
= cbarg
;
4517 flash
->instance
= instance
;
4518 flash
->residue
= len
;
4520 flash
->addr_off
= offset
;
4522 bfa_flash_read_send(flash
);
4524 return BFA_STATUS_OK
;
4528 * DIAG module specific
4531 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4532 #define BFA_DIAG_FWPING_TOV 1000 /* msec */
4534 /* IOC event handler */
4536 bfa_diag_notify(void *diag_arg
, enum bfa_ioc_event_e event
)
4538 struct bfa_diag_s
*diag
= diag_arg
;
4540 bfa_trc(diag
, event
);
4541 bfa_trc(diag
, diag
->block
);
4542 bfa_trc(diag
, diag
->fwping
.lock
);
4543 bfa_trc(diag
, diag
->tsensor
.lock
);
4546 case BFA_IOC_E_DISABLED
:
4547 case BFA_IOC_E_FAILED
:
4548 if (diag
->fwping
.lock
) {
4549 diag
->fwping
.status
= BFA_STATUS_IOC_FAILURE
;
4550 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4551 diag
->fwping
.status
);
4552 diag
->fwping
.lock
= 0;
4555 if (diag
->tsensor
.lock
) {
4556 diag
->tsensor
.status
= BFA_STATUS_IOC_FAILURE
;
4557 diag
->tsensor
.cbfn(diag
->tsensor
.cbarg
,
4558 diag
->tsensor
.status
);
4559 diag
->tsensor
.lock
= 0;
4563 if (diag
->timer_active
) {
4564 bfa_timer_stop(&diag
->timer
);
4565 diag
->timer_active
= 0;
4568 diag
->status
= BFA_STATUS_IOC_FAILURE
;
4569 diag
->cbfn(diag
->cbarg
, diag
->status
);
4580 bfa_diag_memtest_done(void *cbarg
)
4582 struct bfa_diag_s
*diag
= cbarg
;
4583 struct bfa_ioc_s
*ioc
= diag
->ioc
;
4584 struct bfa_diag_memtest_result
*res
= diag
->result
;
4585 u32 loff
= BFI_BOOT_MEMTEST_RES_ADDR
;
4586 u32 pgnum
, pgoff
, i
;
4588 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
4589 pgoff
= PSS_SMEM_PGOFF(loff
);
4591 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
4593 for (i
= 0; i
< (sizeof(struct bfa_diag_memtest_result
) /
4594 sizeof(u32
)); i
++) {
4595 /* read test result from smem */
4596 *((u32
*) res
+ i
) =
4597 bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
4598 loff
+= sizeof(u32
);
4601 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4602 bfa_ioc_reset_fwstate(ioc
);
4604 res
->status
= swab32(res
->status
);
4605 bfa_trc(diag
, res
->status
);
4607 if (res
->status
== BFI_BOOT_MEMTEST_RES_SIG
)
4608 diag
->status
= BFA_STATUS_OK
;
4610 diag
->status
= BFA_STATUS_MEMTEST_FAILED
;
4611 res
->addr
= swab32(res
->addr
);
4612 res
->exp
= swab32(res
->exp
);
4613 res
->act
= swab32(res
->act
);
4614 res
->err_status
= swab32(res
->err_status
);
4615 res
->err_status1
= swab32(res
->err_status1
);
4616 res
->err_addr
= swab32(res
->err_addr
);
4617 bfa_trc(diag
, res
->addr
);
4618 bfa_trc(diag
, res
->exp
);
4619 bfa_trc(diag
, res
->act
);
4620 bfa_trc(diag
, res
->err_status
);
4621 bfa_trc(diag
, res
->err_status1
);
4622 bfa_trc(diag
, res
->err_addr
);
4624 diag
->timer_active
= 0;
4625 diag
->cbfn(diag
->cbarg
, diag
->status
);
4634 * Perform DMA test directly
4637 diag_fwping_send(struct bfa_diag_s
*diag
)
4639 struct bfi_diag_fwping_req_s
*fwping_req
;
4642 bfa_trc(diag
, diag
->fwping
.dbuf_pa
);
4644 /* fill DMA area with pattern */
4645 for (i
= 0; i
< (BFI_DIAG_DMA_BUF_SZ
>> 2); i
++)
4646 *((u32
*)diag
->fwping
.dbuf_kva
+ i
) = diag
->fwping
.data
;
4649 fwping_req
= (struct bfi_diag_fwping_req_s
*)diag
->fwping
.mbcmd
.msg
;
4652 bfa_alen_set(&fwping_req
->alen
, BFI_DIAG_DMA_BUF_SZ
,
4653 diag
->fwping
.dbuf_pa
);
4654 /* Set up dma count */
4655 fwping_req
->count
= cpu_to_be32(diag
->fwping
.count
);
4656 /* Set up data pattern */
4657 fwping_req
->data
= diag
->fwping
.data
;
4659 /* build host command */
4660 bfi_h2i_set(fwping_req
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_FWPING
,
4661 bfa_ioc_portid(diag
->ioc
));
4664 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->fwping
.mbcmd
);
4668 diag_fwping_comp(struct bfa_diag_s
*diag
,
4669 struct bfi_diag_fwping_rsp_s
*diag_rsp
)
4671 u32 rsp_data
= diag_rsp
->data
;
4672 u8 rsp_dma_status
= diag_rsp
->dma_status
;
4674 bfa_trc(diag
, rsp_data
);
4675 bfa_trc(diag
, rsp_dma_status
);
4677 if (rsp_dma_status
== BFA_STATUS_OK
) {
4679 pat
= (diag
->fwping
.count
& 0x1) ? ~(diag
->fwping
.data
) :
4681 /* Check mbox data */
4682 if (diag
->fwping
.data
!= rsp_data
) {
4683 bfa_trc(diag
, rsp_data
);
4684 diag
->fwping
.result
->dmastatus
=
4685 BFA_STATUS_DATACORRUPTED
;
4686 diag
->fwping
.status
= BFA_STATUS_DATACORRUPTED
;
4687 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4688 diag
->fwping
.status
);
4689 diag
->fwping
.lock
= 0;
4692 /* Check dma pattern */
4693 for (i
= 0; i
< (BFI_DIAG_DMA_BUF_SZ
>> 2); i
++) {
4694 if (*((u32
*)diag
->fwping
.dbuf_kva
+ i
) != pat
) {
4698 *((u32
*)diag
->fwping
.dbuf_kva
+ i
));
4699 diag
->fwping
.result
->dmastatus
=
4700 BFA_STATUS_DATACORRUPTED
;
4701 diag
->fwping
.status
= BFA_STATUS_DATACORRUPTED
;
4702 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4703 diag
->fwping
.status
);
4704 diag
->fwping
.lock
= 0;
4708 diag
->fwping
.result
->dmastatus
= BFA_STATUS_OK
;
4709 diag
->fwping
.status
= BFA_STATUS_OK
;
4710 diag
->fwping
.cbfn(diag
->fwping
.cbarg
, diag
->fwping
.status
);
4711 diag
->fwping
.lock
= 0;
4713 diag
->fwping
.status
= BFA_STATUS_HDMA_FAILED
;
4714 diag
->fwping
.cbfn(diag
->fwping
.cbarg
, diag
->fwping
.status
);
4715 diag
->fwping
.lock
= 0;
4720 * Temperature Sensor
4724 diag_tempsensor_send(struct bfa_diag_s
*diag
)
4726 struct bfi_diag_ts_req_s
*msg
;
4728 msg
= (struct bfi_diag_ts_req_s
*)diag
->tsensor
.mbcmd
.msg
;
4729 bfa_trc(diag
, msg
->temp
);
4730 /* build host command */
4731 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_TEMPSENSOR
,
4732 bfa_ioc_portid(diag
->ioc
));
4734 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->tsensor
.mbcmd
);
4738 diag_tempsensor_comp(struct bfa_diag_s
*diag
, bfi_diag_ts_rsp_t
*rsp
)
4740 if (!diag
->tsensor
.lock
) {
4741 /* receiving response after ioc failure */
4742 bfa_trc(diag
, diag
->tsensor
.lock
);
4747 * ASIC junction tempsensor is a reg read operation
4748 * it will always return OK
4750 diag
->tsensor
.temp
->temp
= be16_to_cpu(rsp
->temp
);
4751 diag
->tsensor
.temp
->ts_junc
= rsp
->ts_junc
;
4752 diag
->tsensor
.temp
->ts_brd
= rsp
->ts_brd
;
4753 diag
->tsensor
.temp
->status
= BFA_STATUS_OK
;
4756 if (rsp
->status
== BFA_STATUS_OK
) {
4757 diag
->tsensor
.temp
->brd_temp
=
4758 be16_to_cpu(rsp
->brd_temp
);
4760 bfa_trc(diag
, rsp
->status
);
4761 diag
->tsensor
.temp
->brd_temp
= 0;
4762 diag
->tsensor
.temp
->status
= BFA_STATUS_DEVBUSY
;
4765 bfa_trc(diag
, rsp
->ts_junc
);
4766 bfa_trc(diag
, rsp
->temp
);
4767 bfa_trc(diag
, rsp
->ts_brd
);
4768 bfa_trc(diag
, rsp
->brd_temp
);
4769 diag
->tsensor
.cbfn(diag
->tsensor
.cbarg
, diag
->tsensor
.status
);
4770 diag
->tsensor
.lock
= 0;
4777 diag_ledtest_send(struct bfa_diag_s
*diag
, struct bfa_diag_ledtest_s
*ledtest
)
4779 struct bfi_diag_ledtest_req_s
*msg
;
4781 msg
= (struct bfi_diag_ledtest_req_s
*)diag
->ledtest
.mbcmd
.msg
;
4782 /* build host command */
4783 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_LEDTEST
,
4784 bfa_ioc_portid(diag
->ioc
));
4787 * convert the freq from N blinks per 10 sec to
4788 * crossbow ontime value. We do it here because division is need
4791 ledtest
->freq
= 500 / ledtest
->freq
;
4793 if (ledtest
->freq
== 0)
4796 bfa_trc(diag
, ledtest
->freq
);
4797 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4798 msg
->cmd
= (u8
) ledtest
->cmd
;
4799 msg
->color
= (u8
) ledtest
->color
;
4800 msg
->portid
= bfa_ioc_portid(diag
->ioc
);
4801 msg
->led
= ledtest
->led
;
4802 msg
->freq
= cpu_to_be16(ledtest
->freq
);
4805 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->ledtest
.mbcmd
);
4809 diag_ledtest_comp(struct bfa_diag_s
*diag
, struct bfi_diag_ledtest_rsp_s
*msg
)
4811 bfa_trc(diag
, diag
->ledtest
.lock
);
4812 diag
->ledtest
.lock
= BFA_FALSE
;
4813 /* no bfa_cb_queue is needed because driver is not waiting */
4820 diag_portbeacon_send(struct bfa_diag_s
*diag
, bfa_boolean_t beacon
, u32 sec
)
4822 struct bfi_diag_portbeacon_req_s
*msg
;
4824 msg
= (struct bfi_diag_portbeacon_req_s
*)diag
->beacon
.mbcmd
.msg
;
4825 /* build host command */
4826 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_PORTBEACON
,
4827 bfa_ioc_portid(diag
->ioc
));
4828 msg
->beacon
= beacon
;
4829 msg
->period
= cpu_to_be32(sec
);
4831 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->beacon
.mbcmd
);
4835 diag_portbeacon_comp(struct bfa_diag_s
*diag
)
4837 bfa_trc(diag
, diag
->beacon
.state
);
4838 diag
->beacon
.state
= BFA_FALSE
;
4839 if (diag
->cbfn_beacon
)
4840 diag
->cbfn_beacon(diag
->dev
, BFA_FALSE
, diag
->beacon
.link_e2e
);
4844 * Diag hmbox handler
4847 bfa_diag_intr(void *diagarg
, struct bfi_mbmsg_s
*msg
)
4849 struct bfa_diag_s
*diag
= diagarg
;
4851 switch (msg
->mh
.msg_id
) {
4852 case BFI_DIAG_I2H_PORTBEACON
:
4853 diag_portbeacon_comp(diag
);
4855 case BFI_DIAG_I2H_FWPING
:
4856 diag_fwping_comp(diag
, (struct bfi_diag_fwping_rsp_s
*) msg
);
4858 case BFI_DIAG_I2H_TEMPSENSOR
:
4859 diag_tempsensor_comp(diag
, (bfi_diag_ts_rsp_t
*) msg
);
4861 case BFI_DIAG_I2H_LEDTEST
:
4862 diag_ledtest_comp(diag
, (struct bfi_diag_ledtest_rsp_s
*) msg
);
4865 bfa_trc(diag
, msg
->mh
.msg_id
);
4873 * @param[in] *diag - diag data struct
4874 * @param[in] *memtest - mem test params input from upper layer,
4875 * @param[in] pattern - mem test pattern
4876 * @param[in] *result - mem test result
4877 * @param[in] cbfn - mem test callback functioin
4878 * @param[in] cbarg - callback functioin arg
4883 bfa_diag_memtest(struct bfa_diag_s
*diag
, struct bfa_diag_memtest_s
*memtest
,
4884 u32 pattern
, struct bfa_diag_memtest_result
*result
,
4885 bfa_cb_diag_t cbfn
, void *cbarg
)
4887 bfa_trc(diag
, pattern
);
4889 if (!bfa_ioc_adapter_is_disabled(diag
->ioc
))
4890 return BFA_STATUS_ADAPTER_ENABLED
;
4892 /* check to see if there is another destructive diag cmd running */
4894 bfa_trc(diag
, diag
->block
);
4895 return BFA_STATUS_DEVBUSY
;
4899 diag
->result
= result
;
4901 diag
->cbarg
= cbarg
;
4903 /* download memtest code and take LPU0 out of reset */
4904 bfa_ioc_boot(diag
->ioc
, BFI_FWBOOT_TYPE_MEMTEST
, BFI_FWBOOT_ENV_OS
);
4906 bfa_timer_begin(diag
->ioc
->timer_mod
, &diag
->timer
,
4907 bfa_diag_memtest_done
, diag
, BFA_DIAG_MEMTEST_TOV
);
4908 diag
->timer_active
= 1;
4909 return BFA_STATUS_OK
;
4913 * DIAG firmware ping command
4915 * @param[in] *diag - diag data struct
4916 * @param[in] cnt - dma loop count for testing PCIE
4917 * @param[in] data - data pattern to pass in fw
4918 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4919 * @param[in] cbfn - callback function
4920 * @param[in] *cbarg - callback functioin arg
4925 bfa_diag_fwping(struct bfa_diag_s
*diag
, u32 cnt
, u32 data
,
4926 struct bfa_diag_results_fwping
*result
, bfa_cb_diag_t cbfn
,
4930 bfa_trc(diag
, data
);
4932 if (!bfa_ioc_is_operational(diag
->ioc
))
4933 return BFA_STATUS_IOC_NON_OP
;
4935 if (bfa_asic_id_ct2(bfa_ioc_devid((diag
->ioc
))) &&
4936 ((diag
->ioc
)->clscode
== BFI_PCIFN_CLASS_ETH
))
4937 return BFA_STATUS_CMD_NOTSUPP
;
4939 /* check to see if there is another destructive diag cmd running */
4940 if (diag
->block
|| diag
->fwping
.lock
) {
4941 bfa_trc(diag
, diag
->block
);
4942 bfa_trc(diag
, diag
->fwping
.lock
);
4943 return BFA_STATUS_DEVBUSY
;
4946 /* Initialization */
4947 diag
->fwping
.lock
= 1;
4948 diag
->fwping
.cbfn
= cbfn
;
4949 diag
->fwping
.cbarg
= cbarg
;
4950 diag
->fwping
.result
= result
;
4951 diag
->fwping
.data
= data
;
4952 diag
->fwping
.count
= cnt
;
4954 /* Init test results */
4955 diag
->fwping
.result
->data
= 0;
4956 diag
->fwping
.result
->status
= BFA_STATUS_OK
;
4958 /* kick off the first ping */
4959 diag_fwping_send(diag
);
4960 return BFA_STATUS_OK
;
4964 * Read Temperature Sensor
4966 * @param[in] *diag - diag data struct
4967 * @param[in] *result - pt to bfa_diag_temp_t data struct
4968 * @param[in] cbfn - callback function
4969 * @param[in] *cbarg - callback functioin arg
4974 bfa_diag_tsensor_query(struct bfa_diag_s
*diag
,
4975 struct bfa_diag_results_tempsensor_s
*result
,
4976 bfa_cb_diag_t cbfn
, void *cbarg
)
4978 /* check to see if there is a destructive diag cmd running */
4979 if (diag
->block
|| diag
->tsensor
.lock
) {
4980 bfa_trc(diag
, diag
->block
);
4981 bfa_trc(diag
, diag
->tsensor
.lock
);
4982 return BFA_STATUS_DEVBUSY
;
4985 if (!bfa_ioc_is_operational(diag
->ioc
))
4986 return BFA_STATUS_IOC_NON_OP
;
4988 /* Init diag mod params */
4989 diag
->tsensor
.lock
= 1;
4990 diag
->tsensor
.temp
= result
;
4991 diag
->tsensor
.cbfn
= cbfn
;
4992 diag
->tsensor
.cbarg
= cbarg
;
4994 /* Send msg to fw */
4995 diag_tempsensor_send(diag
);
4997 return BFA_STATUS_OK
;
5003 * @param[in] *diag - diag data struct
5004 * @param[in] *ledtest - pt to ledtest data structure
5009 bfa_diag_ledtest(struct bfa_diag_s
*diag
, struct bfa_diag_ledtest_s
*ledtest
)
5011 bfa_trc(diag
, ledtest
->cmd
);
5013 if (!bfa_ioc_is_operational(diag
->ioc
))
5014 return BFA_STATUS_IOC_NON_OP
;
5016 if (diag
->beacon
.state
)
5017 return BFA_STATUS_BEACON_ON
;
5019 if (diag
->ledtest
.lock
)
5020 return BFA_STATUS_LEDTEST_OP
;
5022 /* Send msg to fw */
5023 diag
->ledtest
.lock
= BFA_TRUE
;
5024 diag_ledtest_send(diag
, ledtest
);
5026 return BFA_STATUS_OK
;
5030 * Port beaconing command
5032 * @param[in] *diag - diag data struct
5033 * @param[in] beacon - port beaconing 1:ON 0:OFF
5034 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5035 * @param[in] sec - beaconing duration in seconds
5040 bfa_diag_beacon_port(struct bfa_diag_s
*diag
, bfa_boolean_t beacon
,
5041 bfa_boolean_t link_e2e_beacon
, uint32_t sec
)
5043 bfa_trc(diag
, beacon
);
5044 bfa_trc(diag
, link_e2e_beacon
);
5047 if (!bfa_ioc_is_operational(diag
->ioc
))
5048 return BFA_STATUS_IOC_NON_OP
;
5050 if (diag
->ledtest
.lock
)
5051 return BFA_STATUS_LEDTEST_OP
;
5053 if (diag
->beacon
.state
&& beacon
) /* beacon alread on */
5054 return BFA_STATUS_BEACON_ON
;
5056 diag
->beacon
.state
= beacon
;
5057 diag
->beacon
.link_e2e
= link_e2e_beacon
;
5058 if (diag
->cbfn_beacon
)
5059 diag
->cbfn_beacon(diag
->dev
, beacon
, link_e2e_beacon
);
5061 /* Send msg to fw */
5062 diag_portbeacon_send(diag
, beacon
, sec
);
5064 return BFA_STATUS_OK
;
5068 * Return DMA memory needed by diag module.
5071 bfa_diag_meminfo(void)
5073 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5077 * Attach virtual and physical memory for Diag.
5080 bfa_diag_attach(struct bfa_diag_s
*diag
, struct bfa_ioc_s
*ioc
, void *dev
,
5081 bfa_cb_diag_beacon_t cbfn_beacon
, struct bfa_trc_mod_s
*trcmod
)
5085 diag
->trcmod
= trcmod
;
5090 diag
->result
= NULL
;
5091 diag
->cbfn_beacon
= cbfn_beacon
;
5093 bfa_ioc_mbox_regisr(diag
->ioc
, BFI_MC_DIAG
, bfa_diag_intr
, diag
);
5094 bfa_q_qe_init(&diag
->ioc_notify
);
5095 bfa_ioc_notify_init(&diag
->ioc_notify
, bfa_diag_notify
, diag
);
5096 list_add_tail(&diag
->ioc_notify
.qe
, &diag
->ioc
->notify_q
);
5100 bfa_diag_memclaim(struct bfa_diag_s
*diag
, u8
*dm_kva
, u64 dm_pa
)
5102 diag
->fwping
.dbuf_kva
= dm_kva
;
5103 diag
->fwping
.dbuf_pa
= dm_pa
;
5104 memset(diag
->fwping
.dbuf_kva
, 0, BFI_DIAG_DMA_BUF_SZ
);
5108 * PHY module specific
5110 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5111 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5114 bfa_phy_ntoh32(u32
*obuf
, u32
*ibuf
, int sz
)
5118 for (i
= 0; i
< m
; i
++)
5119 obuf
[i
] = be32_to_cpu(ibuf
[i
]);
5122 static bfa_boolean_t
5123 bfa_phy_present(struct bfa_phy_s
*phy
)
5125 return (phy
->ioc
->attr
->card_type
== BFA_MFG_TYPE_LIGHTNING
);
5129 bfa_phy_notify(void *cbarg
, enum bfa_ioc_event_e event
)
5131 struct bfa_phy_s
*phy
= cbarg
;
5133 bfa_trc(phy
, event
);
5136 case BFA_IOC_E_DISABLED
:
5137 case BFA_IOC_E_FAILED
:
5139 phy
->status
= BFA_STATUS_IOC_FAILURE
;
5140 phy
->cbfn(phy
->cbarg
, phy
->status
);
5151 * Send phy attribute query request.
5153 * @param[in] cbarg - callback argument
5156 bfa_phy_query_send(void *cbarg
)
5158 struct bfa_phy_s
*phy
= cbarg
;
5159 struct bfi_phy_query_req_s
*msg
=
5160 (struct bfi_phy_query_req_s
*) phy
->mb
.msg
;
5162 msg
->instance
= phy
->instance
;
5163 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_QUERY_REQ
,
5164 bfa_ioc_portid(phy
->ioc
));
5165 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_phy_attr_s
), phy
->dbuf_pa
);
5166 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5170 * Send phy write request.
5172 * @param[in] cbarg - callback argument
5175 bfa_phy_write_send(void *cbarg
)
5177 struct bfa_phy_s
*phy
= cbarg
;
5178 struct bfi_phy_write_req_s
*msg
=
5179 (struct bfi_phy_write_req_s
*) phy
->mb
.msg
;
5184 msg
->instance
= phy
->instance
;
5185 msg
->offset
= cpu_to_be32(phy
->addr_off
+ phy
->offset
);
5186 len
= (phy
->residue
< BFA_PHY_DMA_BUF_SZ
) ?
5187 phy
->residue
: BFA_PHY_DMA_BUF_SZ
;
5188 msg
->length
= cpu_to_be32(len
);
5190 /* indicate if it's the last msg of the whole write operation */
5191 msg
->last
= (len
== phy
->residue
) ? 1 : 0;
5193 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_WRITE_REQ
,
5194 bfa_ioc_portid(phy
->ioc
));
5195 bfa_alen_set(&msg
->alen
, len
, phy
->dbuf_pa
);
5197 buf
= (u16
*) (phy
->ubuf
+ phy
->offset
);
5198 dbuf
= (u16
*)phy
->dbuf_kva
;
5200 for (i
= 0; i
< sz
; i
++)
5201 buf
[i
] = cpu_to_be16(dbuf
[i
]);
5203 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5205 phy
->residue
-= len
;
5210 * Send phy read request.
5212 * @param[in] cbarg - callback argument
5215 bfa_phy_read_send(void *cbarg
)
5217 struct bfa_phy_s
*phy
= cbarg
;
5218 struct bfi_phy_read_req_s
*msg
=
5219 (struct bfi_phy_read_req_s
*) phy
->mb
.msg
;
5222 msg
->instance
= phy
->instance
;
5223 msg
->offset
= cpu_to_be32(phy
->addr_off
+ phy
->offset
);
5224 len
= (phy
->residue
< BFA_PHY_DMA_BUF_SZ
) ?
5225 phy
->residue
: BFA_PHY_DMA_BUF_SZ
;
5226 msg
->length
= cpu_to_be32(len
);
5227 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_READ_REQ
,
5228 bfa_ioc_portid(phy
->ioc
));
5229 bfa_alen_set(&msg
->alen
, len
, phy
->dbuf_pa
);
5230 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5234 * Send phy stats request.
5236 * @param[in] cbarg - callback argument
5239 bfa_phy_stats_send(void *cbarg
)
5241 struct bfa_phy_s
*phy
= cbarg
;
5242 struct bfi_phy_stats_req_s
*msg
=
5243 (struct bfi_phy_stats_req_s
*) phy
->mb
.msg
;
5245 msg
->instance
= phy
->instance
;
5246 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_STATS_REQ
,
5247 bfa_ioc_portid(phy
->ioc
));
5248 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_phy_stats_s
), phy
->dbuf_pa
);
5249 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5253 * Flash memory info API.
5255 * @param[in] mincfg - minimal cfg variable
5258 bfa_phy_meminfo(bfa_boolean_t mincfg
)
5260 /* min driver doesn't need phy */
5264 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5270 * @param[in] phy - phy structure
5271 * @param[in] ioc - ioc structure
5272 * @param[in] dev - device structure
5273 * @param[in] trcmod - trace module
5274 * @param[in] logmod - log module
5277 bfa_phy_attach(struct bfa_phy_s
*phy
, struct bfa_ioc_s
*ioc
, void *dev
,
5278 struct bfa_trc_mod_s
*trcmod
, bfa_boolean_t mincfg
)
5281 phy
->trcmod
= trcmod
;
5286 bfa_ioc_mbox_regisr(phy
->ioc
, BFI_MC_PHY
, bfa_phy_intr
, phy
);
5287 bfa_q_qe_init(&phy
->ioc_notify
);
5288 bfa_ioc_notify_init(&phy
->ioc_notify
, bfa_phy_notify
, phy
);
5289 list_add_tail(&phy
->ioc_notify
.qe
, &phy
->ioc
->notify_q
);
5291 /* min driver doesn't need phy */
5293 phy
->dbuf_kva
= NULL
;
5299 * Claim memory for phy
5301 * @param[in] phy - phy structure
5302 * @param[in] dm_kva - pointer to virtual memory address
5303 * @param[in] dm_pa - physical memory address
5304 * @param[in] mincfg - minimal cfg variable
5307 bfa_phy_memclaim(struct bfa_phy_s
*phy
, u8
*dm_kva
, u64 dm_pa
,
5308 bfa_boolean_t mincfg
)
5313 phy
->dbuf_kva
= dm_kva
;
5314 phy
->dbuf_pa
= dm_pa
;
5315 memset(phy
->dbuf_kva
, 0, BFA_PHY_DMA_BUF_SZ
);
5316 dm_kva
+= BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5317 dm_pa
+= BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5321 bfa_phy_busy(struct bfa_ioc_s
*ioc
)
5325 rb
= bfa_ioc_bar0(ioc
);
5326 return readl(rb
+ BFA_PHY_LOCK_STATUS
);
5330 * Get phy attribute.
5332 * @param[in] phy - phy structure
5333 * @param[in] attr - phy attribute structure
5334 * @param[in] cbfn - callback function
5335 * @param[in] cbarg - callback argument
5340 bfa_phy_get_attr(struct bfa_phy_s
*phy
, u8 instance
,
5341 struct bfa_phy_attr_s
*attr
, bfa_cb_phy_t cbfn
, void *cbarg
)
5343 bfa_trc(phy
, BFI_PHY_H2I_QUERY_REQ
);
5344 bfa_trc(phy
, instance
);
5346 if (!bfa_phy_present(phy
))
5347 return BFA_STATUS_PHY_NOT_PRESENT
;
5349 if (!bfa_ioc_is_operational(phy
->ioc
))
5350 return BFA_STATUS_IOC_NON_OP
;
5352 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5353 bfa_trc(phy
, phy
->op_busy
);
5354 return BFA_STATUS_DEVBUSY
;
5360 phy
->instance
= instance
;
5361 phy
->ubuf
= (uint8_t *) attr
;
5362 bfa_phy_query_send(phy
);
5364 return BFA_STATUS_OK
;
5370 * @param[in] phy - phy structure
5371 * @param[in] instance - phy image instance
5372 * @param[in] stats - pointer to phy stats
5373 * @param[in] cbfn - callback function
5374 * @param[in] cbarg - callback argument
5379 bfa_phy_get_stats(struct bfa_phy_s
*phy
, u8 instance
,
5380 struct bfa_phy_stats_s
*stats
,
5381 bfa_cb_phy_t cbfn
, void *cbarg
)
5383 bfa_trc(phy
, BFI_PHY_H2I_STATS_REQ
);
5384 bfa_trc(phy
, instance
);
5386 if (!bfa_phy_present(phy
))
5387 return BFA_STATUS_PHY_NOT_PRESENT
;
5389 if (!bfa_ioc_is_operational(phy
->ioc
))
5390 return BFA_STATUS_IOC_NON_OP
;
5392 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5393 bfa_trc(phy
, phy
->op_busy
);
5394 return BFA_STATUS_DEVBUSY
;
5400 phy
->instance
= instance
;
5401 phy
->ubuf
= (u8
*) stats
;
5402 bfa_phy_stats_send(phy
);
5404 return BFA_STATUS_OK
;
5410 * @param[in] phy - phy structure
5411 * @param[in] instance - phy image instance
5412 * @param[in] buf - update data buffer
5413 * @param[in] len - data buffer length
5414 * @param[in] offset - offset relative to starting address
5415 * @param[in] cbfn - callback function
5416 * @param[in] cbarg - callback argument
5421 bfa_phy_update(struct bfa_phy_s
*phy
, u8 instance
,
5422 void *buf
, u32 len
, u32 offset
,
5423 bfa_cb_phy_t cbfn
, void *cbarg
)
5425 bfa_trc(phy
, BFI_PHY_H2I_WRITE_REQ
);
5426 bfa_trc(phy
, instance
);
5428 bfa_trc(phy
, offset
);
5430 if (!bfa_phy_present(phy
))
5431 return BFA_STATUS_PHY_NOT_PRESENT
;
5433 if (!bfa_ioc_is_operational(phy
->ioc
))
5434 return BFA_STATUS_IOC_NON_OP
;
5436 /* 'len' must be in word (4-byte) boundary */
5437 if (!len
|| (len
& 0x03))
5438 return BFA_STATUS_FAILED
;
5440 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5441 bfa_trc(phy
, phy
->op_busy
);
5442 return BFA_STATUS_DEVBUSY
;
5448 phy
->instance
= instance
;
5451 phy
->addr_off
= offset
;
5454 bfa_phy_write_send(phy
);
5455 return BFA_STATUS_OK
;
5461 * @param[in] phy - phy structure
5462 * @param[in] instance - phy image instance
5463 * @param[in] buf - read data buffer
5464 * @param[in] len - data buffer length
5465 * @param[in] offset - offset relative to starting address
5466 * @param[in] cbfn - callback function
5467 * @param[in] cbarg - callback argument
5472 bfa_phy_read(struct bfa_phy_s
*phy
, u8 instance
,
5473 void *buf
, u32 len
, u32 offset
,
5474 bfa_cb_phy_t cbfn
, void *cbarg
)
5476 bfa_trc(phy
, BFI_PHY_H2I_READ_REQ
);
5477 bfa_trc(phy
, instance
);
5479 bfa_trc(phy
, offset
);
5481 if (!bfa_phy_present(phy
))
5482 return BFA_STATUS_PHY_NOT_PRESENT
;
5484 if (!bfa_ioc_is_operational(phy
->ioc
))
5485 return BFA_STATUS_IOC_NON_OP
;
5487 /* 'len' must be in word (4-byte) boundary */
5488 if (!len
|| (len
& 0x03))
5489 return BFA_STATUS_FAILED
;
5491 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5492 bfa_trc(phy
, phy
->op_busy
);
5493 return BFA_STATUS_DEVBUSY
;
5499 phy
->instance
= instance
;
5502 phy
->addr_off
= offset
;
5504 bfa_phy_read_send(phy
);
5506 return BFA_STATUS_OK
;
5510 * Process phy response messages upon receiving interrupts.
5512 * @param[in] phyarg - phy structure
5513 * @param[in] msg - message structure
5516 bfa_phy_intr(void *phyarg
, struct bfi_mbmsg_s
*msg
)
5518 struct bfa_phy_s
*phy
= phyarg
;
5522 struct bfi_phy_query_rsp_s
*query
;
5523 struct bfi_phy_stats_rsp_s
*stats
;
5524 struct bfi_phy_write_rsp_s
*write
;
5525 struct bfi_phy_read_rsp_s
*read
;
5526 struct bfi_mbmsg_s
*msg
;
5530 bfa_trc(phy
, msg
->mh
.msg_id
);
5532 if (!phy
->op_busy
) {
5533 /* receiving response after ioc failure */
5534 bfa_trc(phy
, 0x9999);
5538 switch (msg
->mh
.msg_id
) {
5539 case BFI_PHY_I2H_QUERY_RSP
:
5540 status
= be32_to_cpu(m
.query
->status
);
5541 bfa_trc(phy
, status
);
5543 if (status
== BFA_STATUS_OK
) {
5544 struct bfa_phy_attr_s
*attr
=
5545 (struct bfa_phy_attr_s
*) phy
->ubuf
;
5546 bfa_phy_ntoh32((u32
*)attr
, (u32
*)phy
->dbuf_kva
,
5547 sizeof(struct bfa_phy_attr_s
));
5548 bfa_trc(phy
, attr
->status
);
5549 bfa_trc(phy
, attr
->length
);
5552 phy
->status
= status
;
5555 phy
->cbfn(phy
->cbarg
, phy
->status
);
5557 case BFI_PHY_I2H_STATS_RSP
:
5558 status
= be32_to_cpu(m
.stats
->status
);
5559 bfa_trc(phy
, status
);
5561 if (status
== BFA_STATUS_OK
) {
5562 struct bfa_phy_stats_s
*stats
=
5563 (struct bfa_phy_stats_s
*) phy
->ubuf
;
5564 bfa_phy_ntoh32((u32
*)stats
, (u32
*)phy
->dbuf_kva
,
5565 sizeof(struct bfa_phy_stats_s
));
5566 bfa_trc(phy
, stats
->status
);
5569 phy
->status
= status
;
5572 phy
->cbfn(phy
->cbarg
, phy
->status
);
5574 case BFI_PHY_I2H_WRITE_RSP
:
5575 status
= be32_to_cpu(m
.write
->status
);
5576 bfa_trc(phy
, status
);
5578 if (status
!= BFA_STATUS_OK
|| phy
->residue
== 0) {
5579 phy
->status
= status
;
5582 phy
->cbfn(phy
->cbarg
, phy
->status
);
5584 bfa_trc(phy
, phy
->offset
);
5585 bfa_phy_write_send(phy
);
5588 case BFI_PHY_I2H_READ_RSP
:
5589 status
= be32_to_cpu(m
.read
->status
);
5590 bfa_trc(phy
, status
);
5592 if (status
!= BFA_STATUS_OK
) {
5593 phy
->status
= status
;
5596 phy
->cbfn(phy
->cbarg
, phy
->status
);
5598 u32 len
= be32_to_cpu(m
.read
->length
);
5599 u16
*buf
= (u16
*)(phy
->ubuf
+ phy
->offset
);
5600 u16
*dbuf
= (u16
*)phy
->dbuf_kva
;
5601 int i
, sz
= len
>> 1;
5603 bfa_trc(phy
, phy
->offset
);
5606 for (i
= 0; i
< sz
; i
++)
5607 buf
[i
] = be16_to_cpu(dbuf
[i
]);
5609 phy
->residue
-= len
;
5612 if (phy
->residue
== 0) {
5613 phy
->status
= status
;
5616 phy
->cbfn(phy
->cbarg
, phy
->status
);
5618 bfa_phy_read_send(phy
);
5627 * DCONF module specific
5633 * DCONF state machine events
5635 enum bfa_dconf_event
{
5636 BFA_DCONF_SM_INIT
= 1, /* dconf Init */
5637 BFA_DCONF_SM_FLASH_COMP
= 2, /* read/write to flash */
5638 BFA_DCONF_SM_WR
= 3, /* binding change, map */
5639 BFA_DCONF_SM_TIMEOUT
= 4, /* Start timer */
5640 BFA_DCONF_SM_EXIT
= 5, /* exit dconf module */
5641 BFA_DCONF_SM_IOCDISABLE
= 6, /* IOC disable event */
5644 /* forward declaration of DCONF state machine */
5645 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s
*dconf
,
5646 enum bfa_dconf_event event
);
5647 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s
*dconf
,
5648 enum bfa_dconf_event event
);
5649 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s
*dconf
,
5650 enum bfa_dconf_event event
);
5651 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s
*dconf
,
5652 enum bfa_dconf_event event
);
5653 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s
*dconf
,
5654 enum bfa_dconf_event event
);
5655 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s
*dconf
,
5656 enum bfa_dconf_event event
);
5657 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s
*dconf
,
5658 enum bfa_dconf_event event
);
5660 static void bfa_dconf_cbfn(void *dconf
, bfa_status_t status
);
5661 static void bfa_dconf_timer(void *cbarg
);
5662 static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s
*dconf
);
5663 static void bfa_dconf_init_cb(void *arg
, bfa_status_t status
);
5666 * Begining state of dconf module. Waiting for an event to start.
5669 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5671 bfa_status_t bfa_status
;
5672 bfa_trc(dconf
->bfa
, event
);
5675 case BFA_DCONF_SM_INIT
:
5676 if (dconf
->min_cfg
) {
5677 bfa_trc(dconf
->bfa
, dconf
->min_cfg
);
5680 bfa_sm_set_state(dconf
, bfa_dconf_sm_flash_read
);
5681 dconf
->flashdone
= BFA_FALSE
;
5682 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5683 bfa_status
= bfa_flash_read_part(BFA_FLASH(dconf
->bfa
),
5684 BFA_FLASH_PART_DRV
, dconf
->instance
,
5686 sizeof(struct bfa_dconf_s
), 0,
5687 bfa_dconf_init_cb
, dconf
->bfa
);
5688 if (bfa_status
!= BFA_STATUS_OK
) {
5689 bfa_dconf_init_cb(dconf
->bfa
, BFA_STATUS_FAILED
);
5690 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5694 case BFA_DCONF_SM_EXIT
:
5695 dconf
->flashdone
= BFA_TRUE
;
5696 case BFA_DCONF_SM_IOCDISABLE
:
5697 case BFA_DCONF_SM_WR
:
5698 case BFA_DCONF_SM_FLASH_COMP
:
5701 bfa_sm_fault(dconf
->bfa
, event
);
5706 * Read flash for dconf entries and make a call back to the driver once done.
5709 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s
*dconf
,
5710 enum bfa_dconf_event event
)
5712 bfa_trc(dconf
->bfa
, event
);
5715 case BFA_DCONF_SM_FLASH_COMP
:
5716 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5718 case BFA_DCONF_SM_TIMEOUT
:
5719 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5721 case BFA_DCONF_SM_EXIT
:
5722 dconf
->flashdone
= BFA_TRUE
;
5723 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5724 case BFA_DCONF_SM_IOCDISABLE
:
5725 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5728 bfa_sm_fault(dconf
->bfa
, event
);
5733 * DCONF Module is in ready state. Has completed the initialization.
5736 bfa_dconf_sm_ready(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5738 bfa_trc(dconf
->bfa
, event
);
5741 case BFA_DCONF_SM_WR
:
5742 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5743 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5744 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5746 case BFA_DCONF_SM_EXIT
:
5747 dconf
->flashdone
= BFA_TRUE
;
5748 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5749 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5751 case BFA_DCONF_SM_INIT
:
5752 case BFA_DCONF_SM_IOCDISABLE
:
5755 bfa_sm_fault(dconf
->bfa
, event
);
5760 * entries are dirty, write back to the flash.
5764 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5766 bfa_trc(dconf
->bfa
, event
);
5769 case BFA_DCONF_SM_TIMEOUT
:
5770 bfa_sm_set_state(dconf
, bfa_dconf_sm_sync
);
5771 bfa_dconf_flash_write(dconf
);
5773 case BFA_DCONF_SM_WR
:
5774 bfa_timer_stop(&dconf
->timer
);
5775 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5776 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5778 case BFA_DCONF_SM_EXIT
:
5779 bfa_timer_stop(&dconf
->timer
);
5780 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5781 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5782 bfa_sm_set_state(dconf
, bfa_dconf_sm_final_sync
);
5783 bfa_dconf_flash_write(dconf
);
5785 case BFA_DCONF_SM_FLASH_COMP
:
5787 case BFA_DCONF_SM_IOCDISABLE
:
5788 bfa_timer_stop(&dconf
->timer
);
5789 bfa_sm_set_state(dconf
, bfa_dconf_sm_iocdown_dirty
);
5792 bfa_sm_fault(dconf
->bfa
, event
);
5797 * Sync the dconf entries to the flash.
5800 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s
*dconf
,
5801 enum bfa_dconf_event event
)
5803 bfa_trc(dconf
->bfa
, event
);
5806 case BFA_DCONF_SM_IOCDISABLE
:
5807 case BFA_DCONF_SM_FLASH_COMP
:
5808 bfa_timer_stop(&dconf
->timer
);
5809 case BFA_DCONF_SM_TIMEOUT
:
5810 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5811 dconf
->flashdone
= BFA_TRUE
;
5812 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5813 bfa_ioc_disable(&dconf
->bfa
->ioc
);
5816 bfa_sm_fault(dconf
->bfa
, event
);
5821 bfa_dconf_sm_sync(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5823 bfa_trc(dconf
->bfa
, event
);
5826 case BFA_DCONF_SM_FLASH_COMP
:
5827 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5829 case BFA_DCONF_SM_WR
:
5830 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5831 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5832 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5834 case BFA_DCONF_SM_EXIT
:
5835 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5836 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5837 bfa_sm_set_state(dconf
, bfa_dconf_sm_final_sync
);
5839 case BFA_DCONF_SM_IOCDISABLE
:
5840 bfa_sm_set_state(dconf
, bfa_dconf_sm_iocdown_dirty
);
5843 bfa_sm_fault(dconf
->bfa
, event
);
5848 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s
*dconf
,
5849 enum bfa_dconf_event event
)
5851 bfa_trc(dconf
->bfa
, event
);
5854 case BFA_DCONF_SM_INIT
:
5855 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5856 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5857 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5859 case BFA_DCONF_SM_EXIT
:
5860 dconf
->flashdone
= BFA_TRUE
;
5861 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5863 case BFA_DCONF_SM_IOCDISABLE
:
5866 bfa_sm_fault(dconf
->bfa
, event
);
5871 * Compute and return memory needed by DRV_CFG module.
5874 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s
*cfg
, struct bfa_meminfo_s
*meminfo
,
5877 struct bfa_mem_kva_s
*dconf_kva
= BFA_MEM_DCONF_KVA(bfa
);
5879 if (cfg
->drvcfg
.min_cfg
)
5880 bfa_mem_kva_setup(meminfo
, dconf_kva
,
5881 sizeof(struct bfa_dconf_hdr_s
));
5883 bfa_mem_kva_setup(meminfo
, dconf_kva
,
5884 sizeof(struct bfa_dconf_s
));
5888 bfa_dconf_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
5889 struct bfa_pcidev_s
*pcidev
)
5891 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5895 dconf
->instance
= bfa
->ioc
.port_id
;
5896 bfa_trc(bfa
, dconf
->instance
);
5898 dconf
->dconf
= (struct bfa_dconf_s
*) bfa_mem_kva_curp(dconf
);
5899 if (cfg
->drvcfg
.min_cfg
) {
5900 bfa_mem_kva_curp(dconf
) += sizeof(struct bfa_dconf_hdr_s
);
5901 dconf
->min_cfg
= BFA_TRUE
;
5903 * Set the flashdone flag to TRUE explicitly as no flash
5904 * write will happen in min_cfg mode.
5906 dconf
->flashdone
= BFA_TRUE
;
5908 dconf
->min_cfg
= BFA_FALSE
;
5909 bfa_mem_kva_curp(dconf
) += sizeof(struct bfa_dconf_s
);
5912 bfa_dconf_read_data_valid(bfa
) = BFA_FALSE
;
5913 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5917 bfa_dconf_init_cb(void *arg
, bfa_status_t status
)
5919 struct bfa_s
*bfa
= arg
;
5920 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5922 dconf
->flashdone
= BFA_TRUE
;
5923 bfa_trc(bfa
, dconf
->flashdone
);
5924 bfa_iocfc_cb_dconf_modinit(bfa
, status
);
5925 if (status
== BFA_STATUS_OK
) {
5926 bfa_dconf_read_data_valid(bfa
) = BFA_TRUE
;
5927 if (dconf
->dconf
->hdr
.signature
!= BFI_DCONF_SIGNATURE
)
5928 dconf
->dconf
->hdr
.signature
= BFI_DCONF_SIGNATURE
;
5929 if (dconf
->dconf
->hdr
.version
!= BFI_DCONF_VERSION
)
5930 dconf
->dconf
->hdr
.version
= BFI_DCONF_VERSION
;
5932 bfa_sm_send_event(dconf
, BFA_DCONF_SM_FLASH_COMP
);
5936 bfa_dconf_modinit(struct bfa_s
*bfa
)
5938 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5939 bfa_sm_send_event(dconf
, BFA_DCONF_SM_INIT
);
5942 bfa_dconf_start(struct bfa_s
*bfa
)
5947 bfa_dconf_stop(struct bfa_s
*bfa
)
5951 static void bfa_dconf_timer(void *cbarg
)
5953 struct bfa_dconf_mod_s
*dconf
= cbarg
;
5954 bfa_sm_send_event(dconf
, BFA_DCONF_SM_TIMEOUT
);
5957 bfa_dconf_iocdisable(struct bfa_s
*bfa
)
5959 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5960 bfa_sm_send_event(dconf
, BFA_DCONF_SM_IOCDISABLE
);
5964 bfa_dconf_detach(struct bfa_s
*bfa
)
5969 bfa_dconf_flash_write(struct bfa_dconf_mod_s
*dconf
)
5971 bfa_status_t bfa_status
;
5972 bfa_trc(dconf
->bfa
, 0);
5974 bfa_status
= bfa_flash_update_part(BFA_FLASH(dconf
->bfa
),
5975 BFA_FLASH_PART_DRV
, dconf
->instance
,
5976 dconf
->dconf
, sizeof(struct bfa_dconf_s
), 0,
5977 bfa_dconf_cbfn
, dconf
);
5978 if (bfa_status
!= BFA_STATUS_OK
)
5979 WARN_ON(bfa_status
);
5980 bfa_trc(dconf
->bfa
, bfa_status
);
5986 bfa_dconf_update(struct bfa_s
*bfa
)
5988 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5989 bfa_trc(dconf
->bfa
, 0);
5990 if (bfa_sm_cmp_state(dconf
, bfa_dconf_sm_iocdown_dirty
))
5991 return BFA_STATUS_FAILED
;
5993 if (dconf
->min_cfg
) {
5994 bfa_trc(dconf
->bfa
, dconf
->min_cfg
);
5995 return BFA_STATUS_FAILED
;
5998 bfa_sm_send_event(dconf
, BFA_DCONF_SM_WR
);
5999 return BFA_STATUS_OK
;
6003 bfa_dconf_cbfn(void *arg
, bfa_status_t status
)
6005 struct bfa_dconf_mod_s
*dconf
= arg
;
6007 bfa_sm_send_event(dconf
, BFA_DCONF_SM_FLASH_COMP
);
6011 bfa_dconf_modexit(struct bfa_s
*bfa
)
6013 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
6014 BFA_DCONF_MOD(bfa
)->flashdone
= BFA_FALSE
;
6015 bfa_trc(bfa
, BFA_DCONF_MOD(bfa
)->flashdone
);
6016 bfa_sm_send_event(dconf
, BFA_DCONF_SM_EXIT
);