be2net: Move the Emulex driver
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bna / bfa_ioc.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
a9602490 22#include "bfi_reg.h"
8b230ed8
RM
23#include "bfa_defs.h"
24
25/**
26 * IOC local definitions
27 */
28
8b230ed8
RM
29/**
30 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
31 */
32
33#define bfa_ioc_firmware_lock(__ioc) \
34 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
35#define bfa_ioc_firmware_unlock(__ioc) \
36 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
37#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
1d32f769
RM
39#define bfa_ioc_notify_fail(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
79ea6c89
RM
41#define bfa_ioc_sync_start(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
1d32f769
RM
43#define bfa_ioc_sync_join(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
45#define bfa_ioc_sync_leave(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
47#define bfa_ioc_sync_ack(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
49#define bfa_ioc_sync_complete(__ioc) \
50 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
8b230ed8 51
8b230ed8
RM
52#define bfa_ioc_mbox_cmd_pending(__ioc) \
53 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
54 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
55
b7ee31c5 56static bool bfa_nw_auto_recover = true;
8b230ed8
RM
57
58/*
59 * forward declarations
60 */
d4e16d42 61static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
8b230ed8
RM
62static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
63static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
64static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
65static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
66static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
67static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
68static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
69static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
70static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
71static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
fdad400f 72static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
8b230ed8
RM
73static void bfa_ioc_recover(struct bfa_ioc *ioc);
74static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
bd5a92e9 75static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
8b230ed8
RM
76static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
77static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
1d32f769
RM
78static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
79static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
80static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
81static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
82static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
83static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
8a891429
RM
84static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
85 u32 boot_param);
86static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
8a891429
RM
87static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
88 char *serial_num);
89static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
90 char *fw_ver);
91static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
92 char *chip_rev);
93static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
94 char *optrom_ver);
95static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
96 char *manufacturer);
97static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
98static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
8b230ed8
RM
99
100/**
1d32f769 101 * IOC state machine definitions/declarations
8b230ed8
RM
102 */
103enum ioc_event {
1d32f769
RM
104 IOC_E_RESET = 1, /*!< IOC reset request */
105 IOC_E_ENABLE = 2, /*!< IOC enable request */
106 IOC_E_DISABLE = 3, /*!< IOC disable request */
107 IOC_E_DETACH = 4, /*!< driver detach cleanup */
108 IOC_E_ENABLED = 5, /*!< f/w enabled */
109 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
110 IOC_E_DISABLED = 7, /*!< f/w disabled */
111 IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
f374b361 112 IOC_E_PFFAILED = 9, /*!< failure notice by iocpf sm */
1d32f769
RM
113 IOC_E_HBFAIL = 10, /*!< heartbeat failure */
114 IOC_E_HWERROR = 11, /*!< hardware error interrupt */
115 IOC_E_TIMEOUT = 12, /*!< timeout */
8b230ed8
RM
116};
117
1d32f769 118bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
8b230ed8 119bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
8b230ed8
RM
120bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
1d32f769
RM
123bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
8b230ed8
RM
125bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
127
128static struct bfa_sm_table ioc_sm_table[] = {
1d32f769 129 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
8b230ed8 130 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
1d32f769 131 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
8b230ed8
RM
132 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
1d32f769
RM
134 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
135 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
8b230ed8
RM
136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138};
139
1d32f769
RM
140/**
141 * IOCPF state machine definitions/declarations
142 */
143
144/*
145 * Forward declareations for iocpf state machine
146 */
147static void bfa_iocpf_enable(struct bfa_ioc *ioc);
148static void bfa_iocpf_disable(struct bfa_ioc *ioc);
149static void bfa_iocpf_fail(struct bfa_ioc *ioc);
150static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
151static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
152static void bfa_iocpf_stop(struct bfa_ioc *ioc);
153
154/**
155 * IOCPF state machine events
156 */
157enum iocpf_event {
158 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
159 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
160 IOCPF_E_STOP = 3, /*!< stop on driver detach */
0120b99c 161 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
1d32f769
RM
162 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
163 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
164 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
165 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
166 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
167 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
168 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
169};
170
171/**
172 * IOCPF states
173 */
174enum bfa_iocpf_state {
175 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
176 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
177 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
178 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
179 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
180 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
181 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
182 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
183 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
184};
185
186bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
187bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
188bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
189bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
190bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
191bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
192bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
193bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
194 enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
197bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
199bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
200 enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
202
203static struct bfa_sm_table iocpf_sm_table[] = {
204 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
205 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
206 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
207 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
208 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
209 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
210 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
211 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
212 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
213 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
214 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
215 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
216 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
217 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
218};
219
220/**
221 * IOC State Machine
222 */
223
224/**
225 * Beginning state. IOC uninit state.
226 */
227static void
228bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
229{
230}
231
232/**
233 * IOC is in uninit state.
234 */
235static void
236bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
237{
238 switch (event) {
239 case IOC_E_RESET:
240 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
241 break;
242
243 default:
ac51f60f 244 bfa_sm_fault(event);
1d32f769
RM
245 }
246}
247
8b230ed8
RM
248/**
249 * Reset entry actions -- initialize state machine
250 */
251static void
252bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
253{
1d32f769 254 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
8b230ed8
RM
255}
256
257/**
1d32f769 258 * IOC is in reset state.
8b230ed8
RM
259 */
260static void
261bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
262{
263 switch (event) {
264 case IOC_E_ENABLE:
1d32f769 265 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
8b230ed8
RM
266 break;
267
268 case IOC_E_DISABLE:
269 bfa_ioc_disable_comp(ioc);
270 break;
271
272 case IOC_E_DETACH:
1d32f769
RM
273 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
274 break;
275
276 default:
ac51f60f 277 bfa_sm_fault(event);
1d32f769
RM
278 }
279}
280
281static void
282bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
283{
284 bfa_iocpf_enable(ioc);
285}
286
287/**
288 * Host IOC function is being enabled, awaiting response from firmware.
289 * Semaphore is acquired.
290 */
291static void
292bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
293{
294 switch (event) {
295 case IOC_E_ENABLED:
296 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
297 break;
298
f374b361 299 case IOC_E_PFFAILED:
1d32f769
RM
300 /* !!! fall through !!! */
301 case IOC_E_HWERROR:
302 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
303 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
f374b361 304 if (event != IOC_E_PFFAILED)
1d32f769
RM
305 bfa_iocpf_initfail(ioc);
306 break;
307
308 case IOC_E_DISABLE:
309 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
310 break;
311
312 case IOC_E_DETACH:
313 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
314 bfa_iocpf_stop(ioc);
315 break;
316
317 case IOC_E_ENABLE:
8b230ed8
RM
318 break;
319
320 default:
ac51f60f 321 bfa_sm_fault(event);
8b230ed8
RM
322 }
323}
324
325/**
326 * Semaphore should be acquired for version check.
327 */
328static void
1d32f769
RM
329bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
330{
331 mod_timer(&ioc->ioc_timer, jiffies +
332 msecs_to_jiffies(BFA_IOC_TOV));
333 bfa_ioc_send_getattr(ioc);
334}
335
336/**
337 * IOC configuration in progress. Timer is active.
338 */
339static void
340bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
341{
342 switch (event) {
343 case IOC_E_FWRSP_GETATTR:
344 del_timer(&ioc->ioc_timer);
345 bfa_ioc_check_attr_wwns(ioc);
346 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
347 break;
348
f374b361 349 case IOC_E_PFFAILED:
1d32f769
RM
350 case IOC_E_HWERROR:
351 del_timer(&ioc->ioc_timer);
352 /* fall through */
353 case IOC_E_TIMEOUT:
354 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
355 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
f374b361 356 if (event != IOC_E_PFFAILED)
1d32f769
RM
357 bfa_iocpf_getattrfail(ioc);
358 break;
359
360 case IOC_E_DISABLE:
361 del_timer(&ioc->ioc_timer);
362 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
363 break;
364
365 case IOC_E_ENABLE:
366 break;
367
368 default:
ac51f60f 369 bfa_sm_fault(event);
1d32f769
RM
370 }
371}
372
373static void
374bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
375{
376 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
377 bfa_ioc_hb_monitor(ioc);
378}
379
380static void
381bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
382{
383 switch (event) {
384 case IOC_E_ENABLE:
385 break;
386
387 case IOC_E_DISABLE:
388 bfa_ioc_hb_stop(ioc);
389 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
390 break;
391
f374b361 392 case IOC_E_PFFAILED:
1d32f769
RM
393 case IOC_E_HWERROR:
394 bfa_ioc_hb_stop(ioc);
395 /* !!! fall through !!! */
396 case IOC_E_HBFAIL:
397 bfa_ioc_fail_notify(ioc);
398 if (ioc->iocpf.auto_recover)
399 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
400 else
401 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
402
f374b361 403 if (event != IOC_E_PFFAILED)
1d32f769
RM
404 bfa_iocpf_fail(ioc);
405 break;
406
407 default:
ac51f60f 408 bfa_sm_fault(event);
1d32f769
RM
409 }
410}
411
412static void
413bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
414{
415 bfa_iocpf_disable(ioc);
416}
417
418/**
419 * IOC is being desabled
420 */
421static void
422bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
423{
424 switch (event) {
425 case IOC_E_DISABLED:
426 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
427 break;
428
429 case IOC_E_HWERROR:
430 /*
431 * No state change. Will move to disabled state
432 * after iocpf sm completes failure processing and
433 * moves to disabled state.
434 */
435 bfa_iocpf_fail(ioc);
436 break;
437
438 default:
ac51f60f 439 bfa_sm_fault(event);
1d32f769
RM
440 }
441}
442
443/**
444 * IOC desable completion entry.
445 */
446static void
447bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
448{
449 bfa_ioc_disable_comp(ioc);
450}
451
452static void
453bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
454{
455 switch (event) {
456 case IOC_E_ENABLE:
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
458 break;
459
460 case IOC_E_DISABLE:
461 ioc->cbfn->disable_cbfn(ioc->bfa);
462 break;
463
464 case IOC_E_DETACH:
465 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
466 bfa_iocpf_stop(ioc);
467 break;
468
469 default:
ac51f60f 470 bfa_sm_fault(event);
1d32f769
RM
471 }
472}
473
474static void
475bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
476{
477}
478
479/**
480 * Hardware initialization retry.
481 */
482static void
483bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
484{
485 switch (event) {
486 case IOC_E_ENABLED:
487 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
488 break;
489
f374b361 490 case IOC_E_PFFAILED:
1d32f769
RM
491 case IOC_E_HWERROR:
492 /**
493 * Initialization retry failed.
494 */
495 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
f374b361 496 if (event != IOC_E_PFFAILED)
1d32f769
RM
497 bfa_iocpf_initfail(ioc);
498 break;
499
500 case IOC_E_INITFAILED:
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
502 break;
503
504 case IOC_E_ENABLE:
505 break;
506
507 case IOC_E_DISABLE:
508 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
509 break;
510
511 case IOC_E_DETACH:
512 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
513 bfa_iocpf_stop(ioc);
514 break;
515
516 default:
ac51f60f 517 bfa_sm_fault(event);
1d32f769
RM
518 }
519}
520
521static void
522bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
8b230ed8 523{
1d32f769
RM
524}
525
526/**
527 * IOC failure.
528 */
529static void
530bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
531{
532 switch (event) {
533 case IOC_E_ENABLE:
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
535 break;
536
537 case IOC_E_DISABLE:
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 break;
540
541 case IOC_E_DETACH:
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 bfa_iocpf_stop(ioc);
544 break;
545
546 case IOC_E_HWERROR:
547 /* HB failure notification, ignore. */
548 break;
549
550 default:
ac51f60f 551 bfa_sm_fault(event);
1d32f769
RM
552 }
553}
554
555/**
556 * IOCPF State Machine
557 */
558
559/**
560 * Reset entry actions -- initialize state machine
561 */
562static void
563bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
564{
565 iocpf->retry_count = 0;
566 iocpf->auto_recover = bfa_nw_auto_recover;
567}
568
569/**
570 * Beginning state. IOC is in reset state.
571 */
572static void
573bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
574{
575 switch (event) {
576 case IOCPF_E_ENABLE:
577 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
578 break;
579
580 case IOCPF_E_STOP:
581 break;
582
583 default:
ac51f60f 584 bfa_sm_fault(event);
1d32f769
RM
585 }
586}
587
588/**
589 * Semaphore should be acquired for version check.
590 */
591static void
592bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
593{
d4e16d42 594 bfa_ioc_hw_sem_init(iocpf->ioc);
1d32f769 595 bfa_ioc_hw_sem_get(iocpf->ioc);
8b230ed8
RM
596}
597
598/**
599 * Awaiting h/w semaphore to continue with version check.
600 */
601static void
1d32f769 602bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 603{
1d32f769
RM
604 struct bfa_ioc *ioc = iocpf->ioc;
605
8b230ed8 606 switch (event) {
1d32f769 607 case IOCPF_E_SEMLOCKED:
8b230ed8 608 if (bfa_ioc_firmware_lock(ioc)) {
79ea6c89 609 if (bfa_ioc_sync_start(ioc)) {
1d32f769
RM
610 iocpf->retry_count = 0;
611 bfa_ioc_sync_join(ioc);
612 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
613 } else {
614 bfa_ioc_firmware_unlock(ioc);
615 bfa_nw_ioc_hw_sem_release(ioc);
616 mod_timer(&ioc->sem_timer, jiffies +
617 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
618 }
8b230ed8 619 } else {
8a891429 620 bfa_nw_ioc_hw_sem_release(ioc);
1d32f769 621 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
8b230ed8
RM
622 }
623 break;
624
1d32f769 625 case IOCPF_E_DISABLE:
8b230ed8 626 bfa_ioc_hw_sem_get_cancel(ioc);
1d32f769
RM
627 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
628 bfa_ioc_pf_disabled(ioc);
8b230ed8
RM
629 break;
630
1d32f769
RM
631 case IOCPF_E_STOP:
632 bfa_ioc_hw_sem_get_cancel(ioc);
633 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
8b230ed8
RM
634 break;
635
636 default:
ac51f60f 637 bfa_sm_fault(event);
8b230ed8
RM
638 }
639}
640
641/**
1d32f769 642 * Notify enable completion callback
8b230ed8
RM
643 */
644static void
1d32f769 645bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
8b230ed8 646{
1d32f769
RM
647 /* Call only the first time sm enters fwmismatch state. */
648 if (iocpf->retry_count == 0)
649 bfa_ioc_pf_fwmismatch(iocpf->ioc);
650
651 iocpf->retry_count++;
652 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
653 msecs_to_jiffies(BFA_IOC_TOV));
8b230ed8
RM
654}
655
656/**
657 * Awaiting firmware version match.
658 */
659static void
1d32f769 660bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 661{
1d32f769
RM
662 struct bfa_ioc *ioc = iocpf->ioc;
663
8b230ed8 664 switch (event) {
1d32f769
RM
665 case IOCPF_E_TIMEOUT:
666 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
8b230ed8
RM
667 break;
668
1d32f769
RM
669 case IOCPF_E_DISABLE:
670 del_timer(&ioc->iocpf_timer);
671 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
672 bfa_ioc_pf_disabled(ioc);
8b230ed8
RM
673 break;
674
1d32f769
RM
675 case IOCPF_E_STOP:
676 del_timer(&ioc->iocpf_timer);
677 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
8b230ed8
RM
678 break;
679
680 default:
ac51f60f 681 bfa_sm_fault(event);
8b230ed8
RM
682 }
683}
684
685/**
686 * Request for semaphore.
687 */
688static void
1d32f769 689bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
8b230ed8 690{
1d32f769 691 bfa_ioc_hw_sem_get(iocpf->ioc);
8b230ed8
RM
692}
693
694/**
695 * Awaiting semaphore for h/w initialzation.
696 */
697static void
1d32f769 698bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 699{
1d32f769
RM
700 struct bfa_ioc *ioc = iocpf->ioc;
701
8b230ed8 702 switch (event) {
1d32f769
RM
703 case IOCPF_E_SEMLOCKED:
704 if (bfa_ioc_sync_complete(ioc)) {
705 bfa_ioc_sync_join(ioc);
706 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
707 } else {
708 bfa_nw_ioc_hw_sem_release(ioc);
709 mod_timer(&ioc->sem_timer, jiffies +
710 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
711 }
8b230ed8
RM
712 break;
713
1d32f769 714 case IOCPF_E_DISABLE:
8b230ed8 715 bfa_ioc_hw_sem_get_cancel(ioc);
1d32f769 716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
8b230ed8
RM
717 break;
718
719 default:
ac51f60f 720 bfa_sm_fault(event);
8b230ed8
RM
721 }
722}
723
724static void
1d32f769 725bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
8b230ed8 726{
1d32f769
RM
727 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
728 msecs_to_jiffies(BFA_IOC_TOV));
729 bfa_ioc_reset(iocpf->ioc, 0);
8b230ed8
RM
730}
731
732/**
8b230ed8
RM
733 * Hardware is being initialized. Interrupts are enabled.
734 * Holding hardware semaphore lock.
735 */
736static void
1d32f769 737bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 738{
1d32f769
RM
739 struct bfa_ioc *ioc = iocpf->ioc;
740
8b230ed8 741 switch (event) {
1d32f769
RM
742 case IOCPF_E_FWREADY:
743 del_timer(&ioc->iocpf_timer);
744 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
8b230ed8
RM
745 break;
746
1d32f769
RM
747 case IOCPF_E_INITFAIL:
748 del_timer(&ioc->iocpf_timer);
749 /*
750 * !!! fall through !!!
751 */
8b230ed8 752
1d32f769 753 case IOCPF_E_TIMEOUT:
8a891429 754 bfa_nw_ioc_hw_sem_release(ioc);
1d32f769
RM
755 if (event == IOCPF_E_TIMEOUT)
756 bfa_ioc_pf_failed(ioc);
757 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
8b230ed8
RM
758 break;
759
1d32f769
RM
760 case IOCPF_E_DISABLE:
761 del_timer(&ioc->iocpf_timer);
762 bfa_ioc_sync_leave(ioc);
8a891429 763 bfa_nw_ioc_hw_sem_release(ioc);
1d32f769 764 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
8b230ed8
RM
765 break;
766
767 default:
ac51f60f 768 bfa_sm_fault(event);
8b230ed8
RM
769 }
770}
771
772static void
1d32f769 773bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
8b230ed8 774{
1d32f769
RM
775 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
776 msecs_to_jiffies(BFA_IOC_TOV));
777 bfa_ioc_send_enable(iocpf->ioc);
8b230ed8
RM
778}
779
780/**
781 * Host IOC function is being enabled, awaiting response from firmware.
782 * Semaphore is acquired.
783 */
784static void
1d32f769 785bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 786{
1d32f769
RM
787 struct bfa_ioc *ioc = iocpf->ioc;
788
8b230ed8 789 switch (event) {
1d32f769
RM
790 case IOCPF_E_FWRSP_ENABLE:
791 del_timer(&ioc->iocpf_timer);
8a891429 792 bfa_nw_ioc_hw_sem_release(ioc);
1d32f769 793 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
8b230ed8
RM
794 break;
795
1d32f769
RM
796 case IOCPF_E_INITFAIL:
797 del_timer(&ioc->iocpf_timer);
798 /*
799 * !!! fall through !!!
800 */
801 case IOCPF_E_TIMEOUT:
8a891429 802 bfa_nw_ioc_hw_sem_release(ioc);
1d32f769
RM
803 if (event == IOCPF_E_TIMEOUT)
804 bfa_ioc_pf_failed(ioc);
805 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
8b230ed8
RM
806 break;
807
1d32f769
RM
808 case IOCPF_E_DISABLE:
809 del_timer(&ioc->iocpf_timer);
8a891429 810 bfa_nw_ioc_hw_sem_release(ioc);
1d32f769 811 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
8b230ed8
RM
812 break;
813
1d32f769 814 case IOCPF_E_FWREADY:
8b230ed8
RM
815 bfa_ioc_send_enable(ioc);
816 break;
817
818 default:
ac51f60f 819 bfa_sm_fault(event);
8b230ed8
RM
820 }
821}
822
1d32f769
RM
823static bool
824bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
825{
826 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
827}
828
8b230ed8 829static void
1d32f769 830bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
8b230ed8 831{
1d32f769 832 bfa_ioc_pf_enabled(iocpf->ioc);
8b230ed8
RM
833}
834
8b230ed8 835static void
1d32f769 836bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 837{
1d32f769
RM
838 struct bfa_ioc *ioc = iocpf->ioc;
839
8b230ed8 840 switch (event) {
1d32f769
RM
841 case IOCPF_E_DISABLE:
842 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
8b230ed8
RM
843 break;
844
1d32f769
RM
845 case IOCPF_E_GETATTRFAIL:
846 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
847 break;
8b230ed8 848
1d32f769
RM
849 case IOCPF_E_FAIL:
850 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
8b230ed8
RM
851 break;
852
1d32f769
RM
853 case IOCPF_E_FWREADY:
854 bfa_ioc_pf_failed(ioc);
855 if (bfa_nw_ioc_is_operational(ioc))
856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
857 else
858 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
8b230ed8
RM
859 break;
860
861 default:
ac51f60f 862 bfa_sm_fault(event);
8b230ed8
RM
863 }
864}
865
866static void
1d32f769 867bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
8b230ed8 868{
1d32f769
RM
869 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
870 msecs_to_jiffies(BFA_IOC_TOV));
871 bfa_ioc_send_disable(iocpf->ioc);
8b230ed8
RM
872}
873
1d32f769
RM
874/**
875 * IOC is being disabled
876 */
8b230ed8 877static void
1d32f769 878bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 879{
1d32f769 880 struct bfa_ioc *ioc = iocpf->ioc;
8b230ed8 881
1d32f769
RM
882 switch (event) {
883 case IOCPF_E_FWRSP_DISABLE:
884 case IOCPF_E_FWREADY:
885 del_timer(&ioc->iocpf_timer);
886 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
8b230ed8
RM
887 break;
888
1d32f769
RM
889 case IOCPF_E_FAIL:
890 del_timer(&ioc->iocpf_timer);
891 /*
892 * !!! fall through !!!
8b230ed8 893 */
8b230ed8 894
1d32f769
RM
895 case IOCPF_E_TIMEOUT:
896 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
897 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
898 break;
899
900 case IOCPF_E_FWRSP_ENABLE:
8b230ed8
RM
901 break;
902
903 default:
ac51f60f 904 bfa_sm_fault(event);
8b230ed8
RM
905 }
906}
907
908static void
1d32f769 909bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
8b230ed8 910{
1d32f769 911 bfa_ioc_hw_sem_get(iocpf->ioc);
8b230ed8
RM
912}
913
914/**
1d32f769 915 * IOC hb ack request is being removed.
8b230ed8
RM
916 */
917static void
1d32f769 918bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 919{
1d32f769
RM
920 struct bfa_ioc *ioc = iocpf->ioc;
921
8b230ed8 922 switch (event) {
1d32f769
RM
923 case IOCPF_E_SEMLOCKED:
924 bfa_ioc_sync_leave(ioc);
925 bfa_nw_ioc_hw_sem_release(ioc);
926 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
8b230ed8
RM
927 break;
928
1d32f769 929 case IOCPF_E_FAIL:
8b230ed8
RM
930 break;
931
932 default:
ac51f60f 933 bfa_sm_fault(event);
8b230ed8
RM
934 }
935}
936
937/**
938 * IOC disable completion entry.
939 */
940static void
1d32f769 941bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
8b230ed8 942{
fdad400f 943 bfa_ioc_mbox_flush(iocpf->ioc);
1d32f769 944 bfa_ioc_pf_disabled(iocpf->ioc);
8b230ed8
RM
945}
946
947static void
1d32f769 948bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 949{
1d32f769 950 struct bfa_ioc *ioc = iocpf->ioc;
8b230ed8 951
1d32f769
RM
952 switch (event) {
953 case IOCPF_E_ENABLE:
954 iocpf->retry_count = 0;
955 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
8b230ed8
RM
956 break;
957
1d32f769 958 case IOCPF_E_STOP:
8b230ed8 959 bfa_ioc_firmware_unlock(ioc);
1d32f769 960 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
8b230ed8
RM
961 break;
962
963 default:
ac51f60f 964 bfa_sm_fault(event);
8b230ed8
RM
965 }
966}
967
968static void
1d32f769 969bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
8b230ed8 970{
1d32f769 971 bfa_ioc_hw_sem_get(iocpf->ioc);
8b230ed8
RM
972}
973
974/**
8b230ed8
RM
975 * Hardware initialization failed.
976 */
977static void
1d32f769 978bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 979{
1d32f769
RM
980 struct bfa_ioc *ioc = iocpf->ioc;
981
8b230ed8 982 switch (event) {
1d32f769
RM
983 case IOCPF_E_SEMLOCKED:
984 bfa_ioc_notify_fail(ioc);
985 bfa_ioc_sync_ack(ioc);
986 iocpf->retry_count++;
987 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
988 bfa_ioc_sync_leave(ioc);
989 bfa_nw_ioc_hw_sem_release(ioc);
990 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
991 } else {
992 if (bfa_ioc_sync_complete(ioc))
993 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
994 else {
995 bfa_nw_ioc_hw_sem_release(ioc);
996 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
997 }
998 }
8b230ed8
RM
999 break;
1000
1d32f769
RM
1001 case IOCPF_E_DISABLE:
1002 bfa_ioc_hw_sem_get_cancel(ioc);
1003 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1004 break;
1005
1006 case IOCPF_E_STOP:
1007 bfa_ioc_hw_sem_get_cancel(ioc);
8b230ed8 1008 bfa_ioc_firmware_unlock(ioc);
1d32f769 1009 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
8b230ed8
RM
1010 break;
1011
1d32f769 1012 case IOCPF_E_FAIL:
8b230ed8
RM
1013 break;
1014
1015 default:
ac51f60f 1016 bfa_sm_fault(event);
8b230ed8
RM
1017 }
1018}
1019
1020static void
1d32f769 1021bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
8b230ed8 1022{
1d32f769
RM
1023 bfa_ioc_pf_initfailed(iocpf->ioc);
1024}
8b230ed8 1025
1d32f769
RM
1026/**
1027 * Hardware initialization failed.
1028 */
1029static void
1030bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1031{
1032 struct bfa_ioc *ioc = iocpf->ioc;
8b230ed8 1033
1d32f769
RM
1034 switch (event) {
1035 case IOCPF_E_DISABLE:
1036 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1037 break;
8b230ed8 1038
1d32f769
RM
1039 case IOCPF_E_STOP:
1040 bfa_ioc_firmware_unlock(ioc);
1041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1042 break;
1043
1044 default:
ac51f60f 1045 bfa_sm_fault(event);
8b230ed8 1046 }
1d32f769 1047}
8b230ed8 1048
1d32f769
RM
1049static void
1050bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1051{
8b230ed8 1052 /**
1d32f769 1053 * Mark IOC as failed in hardware and stop firmware.
8b230ed8 1054 */
1d32f769 1055 bfa_ioc_lpu_stop(iocpf->ioc);
8b230ed8
RM
1056
1057 /**
1d32f769 1058 * Flush any queued up mailbox requests.
8b230ed8 1059 */
fdad400f 1060 bfa_ioc_mbox_flush(iocpf->ioc);
1d32f769 1061 bfa_ioc_hw_sem_get(iocpf->ioc);
8b230ed8
RM
1062}
1063
1064/**
1d32f769 1065 * IOC is in failed state.
8b230ed8
RM
1066 */
1067static void
1d32f769 1068bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
8b230ed8 1069{
1d32f769 1070 struct bfa_ioc *ioc = iocpf->ioc;
8b230ed8 1071
1d32f769
RM
1072 switch (event) {
1073 case IOCPF_E_SEMLOCKED:
1074 iocpf->retry_count = 0;
1075 bfa_ioc_sync_ack(ioc);
1076 bfa_ioc_notify_fail(ioc);
1077 if (!iocpf->auto_recover) {
1078 bfa_ioc_sync_leave(ioc);
1079 bfa_nw_ioc_hw_sem_release(ioc);
1080 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1081 } else {
1082 if (bfa_ioc_sync_complete(ioc))
1083 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1084 else {
1085 bfa_nw_ioc_hw_sem_release(ioc);
1086 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1087 }
1088 }
8b230ed8
RM
1089 break;
1090
1d32f769
RM
1091 case IOCPF_E_DISABLE:
1092 bfa_ioc_hw_sem_get_cancel(ioc);
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
8b230ed8
RM
1094 break;
1095
1d32f769 1096 case IOCPF_E_FAIL:
8b230ed8
RM
1097 break;
1098
1d32f769 1099 default:
ac51f60f 1100 bfa_sm_fault(event);
1d32f769
RM
1101 }
1102}
8b230ed8 1103
1d32f769
RM
1104static void
1105bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1106{
1107}
1108
1109/**
1110 * @brief
1111 * IOC is in failed state.
1112 */
1113static void
1114bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1115{
1116 switch (event) {
1117 case IOCPF_E_DISABLE:
1118 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
8b230ed8 1119 break;
1d32f769 1120
8b230ed8 1121 default:
ac51f60f 1122 bfa_sm_fault(event);
8b230ed8
RM
1123 }
1124}
1125
1126/**
1127 * BFA IOC private functions
1128 */
1129
bd5a92e9
RM
1130/**
1131 * Notify common modules registered for notification.
1132 */
8b230ed8 1133static void
bd5a92e9 1134bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
8b230ed8 1135{
bd5a92e9 1136 struct bfa_ioc_notify *notify;
8b230ed8 1137 struct list_head *qe;
8b230ed8 1138
bd5a92e9
RM
1139 list_for_each(qe, &ioc->notify_q) {
1140 notify = (struct bfa_ioc_notify *)qe;
1141 notify->cbfn(notify->cbarg, event);
8b230ed8
RM
1142 }
1143}
1144
bd5a92e9
RM
1145static void
1146bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1147{
1148 ioc->cbfn->disable_cbfn(ioc->bfa);
1149 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1150}
1151
8b230ed8 1152bool
8a891429 1153bfa_nw_ioc_sem_get(void __iomem *sem_reg)
8b230ed8
RM
1154{
1155 u32 r32;
1156 int cnt = 0;
1157#define BFA_SEM_SPINCNT 3000
1158
1159 r32 = readl(sem_reg);
1160
1161 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1162 cnt++;
1163 udelay(2);
1164 r32 = readl(sem_reg);
1165 }
1166
1167 if (r32 == 0)
1168 return true;
1169
1170 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
1171 return false;
1172}
1173
1174void
8a891429 1175bfa_nw_ioc_sem_release(void __iomem *sem_reg)
8b230ed8
RM
1176{
1177 writel(1, sem_reg);
1178}
1179
d4e16d42
RM
1180static void
1181bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1182{
1183 struct bfi_ioc_image_hdr fwhdr;
1184 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1185
1186 if (fwstate == BFI_IOC_UNINIT)
1187 return;
1188
1189 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1190
1191 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
1192 return;
1193
1194 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1195
1196 /*
1197 * Try to lock and then unlock the semaphore.
1198 */
1199 readl(ioc->ioc_regs.ioc_sem_reg);
1200 writel(1, ioc->ioc_regs.ioc_sem_reg);
1201}
1202
8b230ed8
RM
1203static void
1204bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1205{
1206 u32 r32;
1207
1208 /**
1209 * First read to the semaphore register will return 0, subsequent reads
1210 * will return 1. Semaphore is released by writing 1 to the register
1211 */
1212 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1213 if (r32 == 0) {
1d32f769 1214 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
8b230ed8
RM
1215 return;
1216 }
1217
1218 mod_timer(&ioc->sem_timer, jiffies +
1219 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1220}
1221
1222void
8a891429 1223bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
8b230ed8
RM
1224{
1225 writel(1, ioc->ioc_regs.ioc_sem_reg);
1226}
1227
1228static void
1229bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1230{
1231 del_timer(&ioc->sem_timer);
1232}
1233
1234/**
1235 * @brief
1236 * Initialize LPU local memory (aka secondary memory / SRAM)
1237 */
1238static void
1239bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1240{
1241 u32 pss_ctl;
1242 int i;
1243#define PSS_LMEM_INIT_TIME 10000
1244
1245 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1246 pss_ctl &= ~__PSS_LMEM_RESET;
1247 pss_ctl |= __PSS_LMEM_INIT_EN;
1248
1249 /*
1250 * i2c workaround 12.5khz clock
1251 */
1252 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1253 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1254
1255 /**
1256 * wait for memory initialization to be complete
1257 */
1258 i = 0;
1259 do {
1260 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1261 i++;
1262 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1263
1264 /**
1265 * If memory initialization is not successful, IOC timeout will catch
1266 * such failures.
1267 */
1268 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1269
1270 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1271 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1272}
1273
1274static void
1275bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1276{
1277 u32 pss_ctl;
1278
1279 /**
1280 * Take processor out of reset.
1281 */
1282 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1283 pss_ctl &= ~__PSS_LPU0_RESET;
1284
1285 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1286}
1287
1288static void
1289bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1290{
1291 u32 pss_ctl;
1292
1293 /**
1294 * Put processors in reset.
1295 */
1296 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1297 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1298
1299 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1300}
1301
1302/**
1303 * Get driver and firmware versions.
1304 */
1305void
8a891429 1306bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
8b230ed8 1307{
58598542 1308 u32 pgnum;
8b230ed8
RM
1309 u32 loff = 0;
1310 int i;
1311 u32 *fwsig = (u32 *) fwhdr;
1312
1313 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
8b230ed8
RM
1314 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1315
1316 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1317 i++) {
1318 fwsig[i] =
1319 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1320 loff += sizeof(u32);
1321 }
1322}
1323
1324/**
1325 * Returns TRUE if same.
1326 */
1327bool
8a891429 1328bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
8b230ed8
RM
1329{
1330 struct bfi_ioc_image_hdr *drv_fwhdr;
1331 int i;
1332
1333 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1334 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1335
1336 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1337 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1338 return false;
1339 }
1340
1341 return true;
1342}
1343
1344/**
1345 * Return true if current running version is valid. Firmware signature and
1346 * execution context (driver/bios) must match.
1347 */
1348static bool
79ea6c89 1349bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
8b230ed8
RM
1350{
1351 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1352
8a891429 1353 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
8b230ed8
RM
1354 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1355 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1356
1357 if (fwhdr.signature != drv_fwhdr->signature)
1358 return false;
1359
79ea6c89 1360 if (swab32(fwhdr.param) != boot_env)
8b230ed8
RM
1361 return false;
1362
8a891429 1363 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
8b230ed8
RM
1364}
1365
1366/**
1367 * Conditionally flush any pending message from firmware at start.
1368 */
1369static void
1370bfa_ioc_msgflush(struct bfa_ioc *ioc)
1371{
1372 u32 r32;
1373
1374 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1375 if (r32)
1376 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1377}
1378
1379/**
1380 * @img ioc_init_logic.jpg
1381 */
1382static void
1383bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1384{
1385 enum bfi_ioc_state ioc_fwstate;
1386 bool fwvalid;
79ea6c89 1387 u32 boot_env;
8b230ed8
RM
1388
1389 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1390
79ea6c89
RM
1391 boot_env = BFI_BOOT_LOADER_OS;
1392
8b230ed8
RM
1393 if (force)
1394 ioc_fwstate = BFI_IOC_UNINIT;
1395
1396 /**
1397 * check if firmware is valid
1398 */
1399 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
79ea6c89 1400 false : bfa_ioc_fwver_valid(ioc, boot_env);
8b230ed8
RM
1401
1402 if (!fwvalid) {
79ea6c89 1403 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
8b230ed8
RM
1404 return;
1405 }
1406
1407 /**
1408 * If hardware initialization is in progress (initialized by other IOC),
1409 * just wait for an initialization completion interrupt.
1410 */
1411 if (ioc_fwstate == BFI_IOC_INITING) {
1412 ioc->cbfn->reset_cbfn(ioc->bfa);
1413 return;
1414 }
1415
1416 /**
1417 * If IOC function is disabled and firmware version is same,
1418 * just re-enable IOC.
8b230ed8 1419 */
2c7d3821 1420 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
8b230ed8
RM
1421 /**
1422 * When using MSI-X any pending firmware ready event should
1423 * be flushed. Otherwise MSI-X interrupts are not delivered.
1424 */
1425 bfa_ioc_msgflush(ioc);
1426 ioc->cbfn->reset_cbfn(ioc->bfa);
1d32f769 1427 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
8b230ed8
RM
1428 return;
1429 }
1430
1431 /**
1432 * Initialize the h/w for any other states.
1433 */
79ea6c89 1434 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
8b230ed8
RM
1435}
1436
1437void
8a891429 1438bfa_nw_ioc_timeout(void *ioc_arg)
8b230ed8
RM
1439{
1440 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1441
1442 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1443}
1444
8a891429 1445static void
8b230ed8
RM
1446bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1447{
1448 u32 *msgp = (u32 *) ioc_msg;
1449 u32 i;
1450
1451 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1452
1453 /*
1454 * first write msg to mailbox registers
1455 */
1456 for (i = 0; i < len / sizeof(u32); i++)
1457 writel(cpu_to_le32(msgp[i]),
1458 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1459
1460 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1461 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1462
1463 /*
1464 * write 1 to mailbox CMD to trigger LPU event
1465 */
1466 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1467 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1468}
1469
1470static void
1471bfa_ioc_send_enable(struct bfa_ioc *ioc)
1472{
1473 struct bfi_ioc_ctrl_req enable_req;
1474 struct timeval tv;
1475
1476 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1477 bfa_ioc_portid(ioc));
1478 enable_req.ioc_class = ioc->ioc_mc;
1479 do_gettimeofday(&tv);
1480 enable_req.tv_sec = ntohl(tv.tv_sec);
1481 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1482}
1483
1484static void
1485bfa_ioc_send_disable(struct bfa_ioc *ioc)
1486{
1487 struct bfi_ioc_ctrl_req disable_req;
1488
1489 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1490 bfa_ioc_portid(ioc));
1491 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1492}
1493
1494static void
1495bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1496{
1497 struct bfi_ioc_getattr_req attr_req;
1498
1499 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1500 bfa_ioc_portid(ioc));
1501 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1502 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1503}
1504
1505void
8a891429 1506bfa_nw_ioc_hb_check(void *cbarg)
8b230ed8
RM
1507{
1508 struct bfa_ioc *ioc = cbarg;
1509 u32 hb_count;
1510
1511 hb_count = readl(ioc->ioc_regs.heartbeat);
1512 if (ioc->hb_count == hb_count) {
8b230ed8
RM
1513 bfa_ioc_recover(ioc);
1514 return;
1515 } else {
1516 ioc->hb_count = hb_count;
1517 }
1518
1519 bfa_ioc_mbox_poll(ioc);
1520 mod_timer(&ioc->hb_timer, jiffies +
1521 msecs_to_jiffies(BFA_IOC_HB_TOV));
1522}
1523
1524static void
1525bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1526{
1527 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1528 mod_timer(&ioc->hb_timer, jiffies +
1529 msecs_to_jiffies(BFA_IOC_HB_TOV));
1530}
1531
1532static void
1533bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1534{
1535 del_timer(&ioc->hb_timer);
1536}
1537
1538/**
1539 * @brief
1540 * Initiate a full firmware download.
1541 */
1542static void
1543bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
79ea6c89 1544 u32 boot_env)
8b230ed8
RM
1545{
1546 u32 *fwimg;
58598542 1547 u32 pgnum;
8b230ed8
RM
1548 u32 loff = 0;
1549 u32 chunkno = 0;
1550 u32 i;
1551
1552 /**
1553 * Initialize LMEM first before code download
1554 */
1555 bfa_ioc_lmem_init(ioc);
1556
8b230ed8
RM
1557 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1558
1559 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
8b230ed8
RM
1560
1561 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1562
1563 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1564 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1565 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1566 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1567 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1568 }
1569
1570 /**
1571 * write smem
1572 */
1573 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1574 ((ioc->ioc_regs.smem_page_start) + (loff)));
1575
1576 loff += sizeof(u32);
1577
1578 /**
1579 * handle page offset wrap around
1580 */
1581 loff = PSS_SMEM_PGOFF(loff);
1582 if (loff == 0) {
1583 pgnum++;
1584 writel(pgnum,
1585 ioc->ioc_regs.host_page_num_fn);
1586 }
1587 }
1588
1589 writel(bfa_ioc_smem_pgnum(ioc, 0),
1590 ioc->ioc_regs.host_page_num_fn);
1591
1592 /*
1593 * Set boot type and boot param at the end.
1594 */
79ea6c89 1595 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
8b230ed8 1596 + (BFI_BOOT_TYPE_OFF)));
79ea6c89
RM
1597 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1598 + (BFI_BOOT_LOADER_OFF)));
8b230ed8
RM
1599}
1600
1601static void
1602bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1603{
1604 bfa_ioc_hwinit(ioc, force);
1605}
1606
1607/**
1608 * @brief
1609 * Update BFA configuration from firmware configuration.
1610 */
1611static void
1612bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1613{
1614 struct bfi_ioc_attr *attr = ioc->attr;
1615
1616 attr->adapter_prop = ntohl(attr->adapter_prop);
1617 attr->card_type = ntohl(attr->card_type);
1618 attr->maxfrsize = ntohs(attr->maxfrsize);
1619
1620 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1621}
1622
1623/**
1624 * Attach time initialization of mbox logic.
1625 */
1626static void
1627bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1628{
1629 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1630 int mc;
1631
1632 INIT_LIST_HEAD(&mod->cmd_q);
1633 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1634 mod->mbhdlr[mc].cbfn = NULL;
1635 mod->mbhdlr[mc].cbarg = ioc->bfa;
1636 }
1637}
1638
1639/**
1640 * Mbox poll timer -- restarts any pending mailbox requests.
1641 */
1642static void
1643bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1644{
1645 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1646 struct bfa_mbox_cmd *cmd;
1647 u32 stat;
1648
1649 /**
1650 * If no command pending, do nothing
1651 */
1652 if (list_empty(&mod->cmd_q))
1653 return;
1654
1655 /**
1656 * If previous command is not yet fetched by firmware, do nothing
1657 */
1658 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1659 if (stat)
1660 return;
1661
1662 /**
1663 * Enqueue command to firmware.
1664 */
1665 bfa_q_deq(&mod->cmd_q, &cmd);
1666 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1667}
1668
1669/**
1670 * Cleanup any pending requests.
1671 */
1672static void
fdad400f 1673bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
8b230ed8
RM
1674{
1675 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1676 struct bfa_mbox_cmd *cmd;
1677
1678 while (!list_empty(&mod->cmd_q))
1679 bfa_q_deq(&mod->cmd_q, &cmd);
1680}
1681
1d32f769
RM
1682static void
1683bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1684{
1d32f769
RM
1685 /**
1686 * Notify driver and common modules registered for notification.
1687 */
1688 ioc->cbfn->hbfail_cbfn(ioc->bfa);
bd5a92e9 1689 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1d32f769
RM
1690}
1691
1692static void
1693bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1694{
1695 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1696}
1697
1698static void
1699bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1700{
1701 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1702}
1703
1704static void
1705bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
1706{
1707 bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
1708}
1709
1710static void
1711bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1712{
f374b361 1713 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1d32f769
RM
1714}
1715
1716static void
1717bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1718{
1719 /**
1720 * Provide enable completion callback and AEN notification.
1721 */
1722 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1723}
1724
8b230ed8
RM
1725/**
1726 * IOC public
1727 */
8a891429 1728static enum bfa_status
8b230ed8
RM
1729bfa_ioc_pll_init(struct bfa_ioc *ioc)
1730{
1731 /*
1732 * Hold semaphore so that nobody can access the chip during init.
1733 */
8a891429 1734 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
8b230ed8
RM
1735
1736 bfa_ioc_pll_init_asic(ioc);
1737
1738 ioc->pllinit = true;
1739 /*
1740 * release semaphore.
1741 */
8a891429 1742 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
8b230ed8
RM
1743
1744 return BFA_STATUS_OK;
1745}
1746
1747/**
1748 * Interface used by diag module to do firmware boot with memory test
1749 * as the entry vector.
1750 */
8a891429 1751static void
79ea6c89 1752bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
8b230ed8
RM
1753{
1754 void __iomem *rb;
1755
1756 bfa_ioc_stats(ioc, ioc_boots);
1757
1758 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1759 return;
1760
1761 /**
1762 * Initialize IOC state of all functions on a chip reset.
1763 */
1764 rb = ioc->pcidev.pci_bar_kva;
79ea6c89 1765 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
8b230ed8
RM
1766 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1767 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1768 } else {
1769 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1770 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1771 }
1772
1773 bfa_ioc_msgflush(ioc);
79ea6c89 1774 bfa_ioc_download_fw(ioc, boot_type, boot_env);
8b230ed8
RM
1775
1776 /**
1777 * Enable interrupts just before starting LPU
1778 */
1779 ioc->cbfn->reset_cbfn(ioc->bfa);
1780 bfa_ioc_lpu_start(ioc);
1781}
1782
1783/**
1784 * Enable/disable IOC failure auto recovery.
1785 */
1786void
8a891429 1787bfa_nw_ioc_auto_recover(bool auto_recover)
8b230ed8 1788{
8a891429 1789 bfa_nw_auto_recover = auto_recover;
8b230ed8
RM
1790}
1791
8a891429 1792static void
8b230ed8
RM
1793bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1794{
1795 u32 *msgp = mbmsg;
1796 u32 r32;
1797 int i;
1798
1799 /**
1800 * read the MBOX msg
1801 */
1802 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1803 i++) {
1804 r32 = readl(ioc->ioc_regs.lpu_mbox +
1805 i * sizeof(u32));
1806 msgp[i] = htonl(r32);
1807 }
1808
1809 /**
1810 * turn off mailbox interrupt by clearing mailbox status
1811 */
1812 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1813 readl(ioc->ioc_regs.lpu_mbox_cmd);
1814}
1815
8a891429 1816static void
8b230ed8
RM
1817bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1818{
1819 union bfi_ioc_i2h_msg_u *msg;
1d32f769 1820 struct bfa_iocpf *iocpf = &ioc->iocpf;
8b230ed8
RM
1821
1822 msg = (union bfi_ioc_i2h_msg_u *) m;
1823
1824 bfa_ioc_stats(ioc, ioc_isrs);
1825
1826 switch (msg->mh.msg_id) {
1827 case BFI_IOC_I2H_HBEAT:
1828 break;
1829
1830 case BFI_IOC_I2H_READY_EVENT:
1d32f769 1831 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
8b230ed8
RM
1832 break;
1833
1834 case BFI_IOC_I2H_ENABLE_REPLY:
1d32f769 1835 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
8b230ed8
RM
1836 break;
1837
1838 case BFI_IOC_I2H_DISABLE_REPLY:
1d32f769 1839 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
8b230ed8
RM
1840 break;
1841
1842 case BFI_IOC_I2H_GETATTR_REPLY:
1843 bfa_ioc_getattr_reply(ioc);
1844 break;
1845
1846 default:
1847 BUG_ON(1);
1848 }
1849}
1850
1851/**
1852 * IOC attach time initialization and setup.
1853 *
1854 * @param[in] ioc memory for IOC
1855 * @param[in] bfa driver instance structure
1856 */
1857void
8a891429 1858bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
8b230ed8
RM
1859{
1860 ioc->bfa = bfa;
1861 ioc->cbfn = cbfn;
1862 ioc->fcmode = false;
1863 ioc->pllinit = false;
1864 ioc->dbg_fwsave_once = true;
1d32f769 1865 ioc->iocpf.ioc = ioc;
8b230ed8
RM
1866
1867 bfa_ioc_mbox_attach(ioc);
bd5a92e9 1868 INIT_LIST_HEAD(&ioc->notify_q);
8b230ed8 1869
1d32f769
RM
1870 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1871 bfa_fsm_send_event(ioc, IOC_E_RESET);
8b230ed8
RM
1872}
1873
1874/**
1875 * Driver detach time IOC cleanup.
1876 */
1877void
8a891429 1878bfa_nw_ioc_detach(struct bfa_ioc *ioc)
8b230ed8
RM
1879{
1880 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1881}
1882
1883/**
1884 * Setup IOC PCI properties.
1885 *
1886 * @param[in] pcidev PCI device information for this IOC
1887 */
1888void
8a891429 1889bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
8b230ed8
RM
1890 enum bfi_mclass mc)
1891{
1892 ioc->ioc_mc = mc;
1893 ioc->pcidev = *pcidev;
1894 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1895 ioc->cna = ioc->ctdev && !ioc->fcmode;
1896
8a891429 1897 bfa_nw_ioc_set_ct_hwif(ioc);
8b230ed8
RM
1898
1899 bfa_ioc_map_port(ioc);
1900 bfa_ioc_reg_init(ioc);
1901}
1902
1903/**
1904 * Initialize IOC dma memory
1905 *
1906 * @param[in] dm_kva kernel virtual address of IOC dma memory
1907 * @param[in] dm_pa physical address of IOC dma memory
1908 */
1909void
8a891429 1910bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
8b230ed8
RM
1911{
1912 /**
1913 * dma memory for firmware attribute
1914 */
1915 ioc->attr_dma.kva = dm_kva;
1916 ioc->attr_dma.pa = dm_pa;
1917 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1918}
1919
1920/**
1921 * Return size of dma memory required.
1922 */
1923u32
8a891429 1924bfa_nw_ioc_meminfo(void)
8b230ed8
RM
1925{
1926 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1927}
1928
1929void
8a891429 1930bfa_nw_ioc_enable(struct bfa_ioc *ioc)
8b230ed8
RM
1931{
1932 bfa_ioc_stats(ioc, ioc_enables);
1933 ioc->dbg_fwsave_once = true;
1934
1935 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1936}
1937
1938void
8a891429 1939bfa_nw_ioc_disable(struct bfa_ioc *ioc)
8b230ed8
RM
1940{
1941 bfa_ioc_stats(ioc, ioc_disables);
1942 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1943}
1944
8a891429 1945static u32
8b230ed8
RM
1946bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1947{
1948 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1949}
1950
8b230ed8
RM
1951/**
1952 * Register mailbox message handler function, to be called by common modules
1953 */
1954void
8a891429 1955bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
8b230ed8
RM
1956 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1957{
1958 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1959
1960 mod->mbhdlr[mc].cbfn = cbfn;
1961 mod->mbhdlr[mc].cbarg = cbarg;
1962}
1963
1964/**
1965 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1966 * Responsibility of caller to serialize
1967 *
1968 * @param[in] ioc IOC instance
1969 * @param[i] cmd Mailbox command
1970 */
1971void
8a891429 1972bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
8b230ed8
RM
1973{
1974 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1975 u32 stat;
1976
1977 /**
1978 * If a previous command is pending, queue new command
1979 */
1980 if (!list_empty(&mod->cmd_q)) {
1981 list_add_tail(&cmd->qe, &mod->cmd_q);
1982 return;
1983 }
1984
1985 /**
1986 * If mailbox is busy, queue command for poll timer
1987 */
1988 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1989 if (stat) {
1990 list_add_tail(&cmd->qe, &mod->cmd_q);
1991 return;
1992 }
1993
1994 /**
1995 * mailbox is free -- queue command to firmware
1996 */
1997 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
bd5a92e9
RM
1998
1999 return;
8b230ed8
RM
2000}
2001
2002/**
2003 * Handle mailbox interrupts
2004 */
2005void
8a891429 2006bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
8b230ed8
RM
2007{
2008 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2009 struct bfi_mbmsg m;
2010 int mc;
2011
2012 bfa_ioc_msgget(ioc, &m);
2013
2014 /**
2015 * Treat IOC message class as special.
2016 */
2017 mc = m.mh.msg_class;
2018 if (mc == BFI_MC_IOC) {
2019 bfa_ioc_isr(ioc, &m);
2020 return;
2021 }
2022
0746556b 2023 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
8b230ed8
RM
2024 return;
2025
2026 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2027}
2028
2029void
8a891429 2030bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
8b230ed8 2031{
9b08a4fc
RM
2032 bfa_ioc_stats(ioc, ioc_hbfails);
2033 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
8b230ed8
RM
2034 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2035}
2036
bd5a92e9
RM
2037/**
2038 * return true if IOC is disabled
2039 */
2040bool
2041bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2042{
2043 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2044 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2045}
2046
8b230ed8
RM
2047/**
2048 * Add to IOC heartbeat failure notification queue. To be used by common
2049 * modules such as cee, port, diag.
2050 */
2051void
bd5a92e9
RM
2052bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2053 struct bfa_ioc_notify *notify)
8b230ed8 2054{
bd5a92e9 2055 list_add_tail(&notify->qe, &ioc->notify_q);
8b230ed8
RM
2056}
2057
2058#define BFA_MFG_NAME "Brocade"
8a891429 2059static void
8b230ed8
RM
2060bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2061 struct bfa_adapter_attr *ad_attr)
2062{
2063 struct bfi_ioc_attr *ioc_attr;
2064
2065 ioc_attr = ioc->attr;
2066
2067 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2068 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2069 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2070 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2071 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2072 sizeof(struct bfa_mfg_vpd));
2073
2074 ad_attr->nports = bfa_ioc_get_nports(ioc);
2075 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2076
2077 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2078 /* For now, model descr uses same model string */
2079 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2080
2081 ad_attr->card_type = ioc_attr->card_type;
2082 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2083
2084 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2085 ad_attr->prototype = 1;
2086 else
2087 ad_attr->prototype = 0;
2088
2089 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
8a891429 2090 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
8b230ed8
RM
2091
2092 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2093 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2094 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2095 ad_attr->asic_rev = ioc_attr->asic_rev;
2096
2097 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2098
2099 ad_attr->cna_capable = ioc->cna;
2100 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2101}
2102
8a891429 2103static enum bfa_ioc_type
8b230ed8
RM
2104bfa_ioc_get_type(struct bfa_ioc *ioc)
2105{
2106 if (!ioc->ctdev || ioc->fcmode)
2107 return BFA_IOC_TYPE_FC;
2108 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2109 return BFA_IOC_TYPE_FCoE;
2110 else if (ioc->ioc_mc == BFI_MC_LL)
2111 return BFA_IOC_TYPE_LL;
2112 else {
2113 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
2114 return BFA_IOC_TYPE_LL;
2115 }
2116}
2117
8a891429 2118static void
8b230ed8
RM
2119bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2120{
2121 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2122 memcpy(serial_num,
2123 (void *)ioc->attr->brcd_serialnum,
2124 BFA_ADAPTER_SERIAL_NUM_LEN);
2125}
2126
8a891429 2127static void
8b230ed8
RM
2128bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2129{
2130 memset(fw_ver, 0, BFA_VERSION_LEN);
2131 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2132}
2133
8a891429 2134static void
8b230ed8
RM
2135bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2136{
2137 BUG_ON(!(chip_rev));
2138
2139 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2140
2141 chip_rev[0] = 'R';
2142 chip_rev[1] = 'e';
2143 chip_rev[2] = 'v';
2144 chip_rev[3] = '-';
2145 chip_rev[4] = ioc->attr->asic_rev;
2146 chip_rev[5] = '\0';
2147}
2148
8a891429 2149static void
8b230ed8
RM
2150bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2151{
2152 memset(optrom_ver, 0, BFA_VERSION_LEN);
2153 memcpy(optrom_ver, ioc->attr->optrom_version,
2154 BFA_VERSION_LEN);
2155}
2156
8a891429 2157static void
8b230ed8
RM
2158bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2159{
2160 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2161 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2162}
2163
8a891429 2164static void
8b230ed8
RM
2165bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2166{
2167 struct bfi_ioc_attr *ioc_attr;
2168
2169 BUG_ON(!(model));
2170 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2171
2172 ioc_attr = ioc->attr;
2173
2174 /**
2175 * model name
2176 */
2177 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2178 BFA_MFG_NAME, ioc_attr->card_type);
2179}
2180
8a891429 2181static enum bfa_ioc_state
8b230ed8
RM
2182bfa_ioc_get_state(struct bfa_ioc *ioc)
2183{
1d32f769
RM
2184 enum bfa_iocpf_state iocpf_st;
2185 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2186
2187 if (ioc_st == BFA_IOC_ENABLING ||
2188 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2189
2190 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2191
2192 switch (iocpf_st) {
2193 case BFA_IOCPF_SEMWAIT:
2194 ioc_st = BFA_IOC_SEMWAIT;
2195 break;
2196
2197 case BFA_IOCPF_HWINIT:
2198 ioc_st = BFA_IOC_HWINIT;
2199 break;
2200
2201 case BFA_IOCPF_FWMISMATCH:
2202 ioc_st = BFA_IOC_FWMISMATCH;
2203 break;
2204
2205 case BFA_IOCPF_FAIL:
2206 ioc_st = BFA_IOC_FAIL;
2207 break;
2208
2209 case BFA_IOCPF_INITFAIL:
2210 ioc_st = BFA_IOC_INITFAIL;
2211 break;
2212
2213 default:
2214 break;
2215 }
2216 }
2217 return ioc_st;
8b230ed8
RM
2218}
2219
2220void
8a891429 2221bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
8b230ed8
RM
2222{
2223 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2224
2225 ioc_attr->state = bfa_ioc_get_state(ioc);
2226 ioc_attr->port_id = ioc->port_id;
2227
2228 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2229
2230 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2231
2232 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2233 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2234 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2235}
2236
2237/**
2238 * WWN public
2239 */
8a891429 2240static u64
8b230ed8
RM
2241bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2242{
2243 return ioc->attr->pwwn;
2244}
2245
8b230ed8 2246mac_t
8a891429 2247bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
8b230ed8 2248{
2c7d3821 2249 return ioc->attr->mac;
8b230ed8
RM
2250}
2251
8b230ed8
RM
2252/**
2253 * Firmware failure detected. Start recovery actions.
2254 */
2255static void
2256bfa_ioc_recover(struct bfa_ioc *ioc)
2257{
1e581486
RM
2258 pr_crit("Heart Beat of IOC has failed\n");
2259 bfa_ioc_stats(ioc, ioc_hbfails);
9b08a4fc 2260 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
1e581486 2261 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
8b230ed8
RM
2262}
2263
2264static void
2265bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2266{
2267 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2268 return;
1d32f769
RM
2269}
2270
2271/**
2272 * @dg hal_iocpf_pvt BFA IOC PF private functions
2273 * @{
2274 */
2275
2276static void
2277bfa_iocpf_enable(struct bfa_ioc *ioc)
2278{
2279 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2280}
2281
2282static void
2283bfa_iocpf_disable(struct bfa_ioc *ioc)
2284{
2285 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2286}
2287
2288static void
2289bfa_iocpf_fail(struct bfa_ioc *ioc)
2290{
2291 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2292}
2293
2294static void
2295bfa_iocpf_initfail(struct bfa_ioc *ioc)
2296{
2297 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2298}
2299
2300static void
2301bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2302{
2303 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2304}
2305
2306static void
2307bfa_iocpf_stop(struct bfa_ioc *ioc)
2308{
2309 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2310}
2311
2312void
2313bfa_nw_iocpf_timeout(void *ioc_arg)
2314{
2315 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2316
2317 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2318}
8b230ed8 2319
1d32f769
RM
2320void
2321bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2322{
2323 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2324
2325 bfa_ioc_hw_sem_get(ioc);
8b230ed8 2326}