[SCSI] bfa: Serialize the IOC hw semaphore unlock logic.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / bfa / bfa_ioc.c
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_ioc.h"
21 #include "bfi_reg.h"
22 #include "bfa_defs.h"
23 #include "bfa_defs_svc.h"
24
25 BFA_TRC_FILE(CNA, IOC);
26
27 /*
28 * IOC local definitions
29 */
30 #define BFA_IOC_TOV 3000 /* msecs */
31 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
32 #define BFA_IOC_HB_TOV 500 /* msecs */
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
35
36 #define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40
41 #define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45
46 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
47
48 /*
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70
71 #define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
74
75 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
76
77 /*
78 * forward declarations
79 */
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82 static void bfa_ioc_timeout(void *ioc);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
93 enum bfa_ioc_event_e event);
94 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
98 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
99
100 /*
101 * IOC state machine definitions/declarations
102 */
103 enum ioc_event {
104 IOC_E_RESET = 1, /* IOC reset request */
105 IOC_E_ENABLE = 2, /* IOC enable request */
106 IOC_E_DISABLE = 3, /* IOC disable request */
107 IOC_E_DETACH = 4, /* driver detach cleanup */
108 IOC_E_ENABLED = 5, /* f/w enabled */
109 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
110 IOC_E_DISABLED = 7, /* f/w disabled */
111 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
112 IOC_E_HBFAIL = 9, /* heartbeat failure */
113 IOC_E_HWERROR = 10, /* hardware error interrupt */
114 IOC_E_TIMEOUT = 11, /* timeout */
115 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
116 IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
117 };
118
119 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
130
131 static struct bfa_sm_table_s ioc_sm_table[] = {
132 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
133 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
134 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
135 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
136 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
137 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
138 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
139 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
140 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
141 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
142 {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
143 };
144
145 /*
146 * IOCPF state machine definitions/declarations
147 */
148
149 #define bfa_iocpf_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
152 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
153
154 #define bfa_iocpf_poll_timer_start(__ioc) \
155 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
156 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
157
158 #define bfa_sem_timer_start(__ioc) \
159 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
160 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
161 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
162
163 /*
164 * Forward declareations for iocpf state machine
165 */
166 static void bfa_iocpf_timeout(void *ioc_arg);
167 static void bfa_iocpf_sem_timeout(void *ioc_arg);
168 static void bfa_iocpf_poll_timeout(void *ioc_arg);
169
170 /*
171 * IOCPF state machine events
172 */
173 enum iocpf_event {
174 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
175 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
176 IOCPF_E_STOP = 3, /* stop on driver detach */
177 IOCPF_E_FWREADY = 4, /* f/w initialization done */
178 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
179 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
180 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
181 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
182 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
183 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
184 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
185 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
186 };
187
188 /*
189 * IOCPF states
190 */
191 enum bfa_iocpf_state {
192 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
193 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
194 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
195 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
196 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
197 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
198 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
199 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
200 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
201 };
202
203 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
207 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
211 enum iocpf_event);
212 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
215 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
216 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
217 enum iocpf_event);
218 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
219
220 static struct bfa_sm_table_s iocpf_sm_table[] = {
221 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
222 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
223 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
225 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
226 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
228 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
231 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
232 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
233 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
234 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
235 };
236
237 /*
238 * IOC State Machine
239 */
240
241 /*
242 * Beginning state. IOC uninit state.
243 */
244
245 static void
246 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
247 {
248 }
249
250 /*
251 * IOC is in uninit state.
252 */
253 static void
254 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
255 {
256 bfa_trc(ioc, event);
257
258 switch (event) {
259 case IOC_E_RESET:
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
261 break;
262
263 default:
264 bfa_sm_fault(ioc, event);
265 }
266 }
267 /*
268 * Reset entry actions -- initialize state machine
269 */
270 static void
271 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
272 {
273 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
274 }
275
276 /*
277 * IOC is in reset state.
278 */
279 static void
280 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
281 {
282 bfa_trc(ioc, event);
283
284 switch (event) {
285 case IOC_E_ENABLE:
286 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
287 break;
288
289 case IOC_E_DISABLE:
290 bfa_ioc_disable_comp(ioc);
291 break;
292
293 case IOC_E_DETACH:
294 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
295 break;
296
297 default:
298 bfa_sm_fault(ioc, event);
299 }
300 }
301
302
303 static void
304 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
305 {
306 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
307 }
308
309 /*
310 * Host IOC function is being enabled, awaiting response from firmware.
311 * Semaphore is acquired.
312 */
313 static void
314 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
315 {
316 bfa_trc(ioc, event);
317
318 switch (event) {
319 case IOC_E_ENABLED:
320 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
321 break;
322
323 case IOC_E_PFFAILED:
324 /* !!! fall through !!! */
325 case IOC_E_HWERROR:
326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
327 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
328 if (event != IOC_E_PFFAILED)
329 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
330 break;
331
332 case IOC_E_HWFAILED:
333 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
334 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
335 break;
336
337 case IOC_E_DISABLE:
338 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
339 break;
340
341 case IOC_E_DETACH:
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
343 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
344 break;
345
346 case IOC_E_ENABLE:
347 break;
348
349 default:
350 bfa_sm_fault(ioc, event);
351 }
352 }
353
354
355 static void
356 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
357 {
358 bfa_ioc_timer_start(ioc);
359 bfa_ioc_send_getattr(ioc);
360 }
361
362 /*
363 * IOC configuration in progress. Timer is active.
364 */
365 static void
366 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
367 {
368 bfa_trc(ioc, event);
369
370 switch (event) {
371 case IOC_E_FWRSP_GETATTR:
372 bfa_ioc_timer_stop(ioc);
373 bfa_ioc_check_attr_wwns(ioc);
374 bfa_ioc_hb_monitor(ioc);
375 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
376 break;
377
378 case IOC_E_FWRSP_ACQ_ADDR:
379 bfa_ioc_timer_stop(ioc);
380 bfa_ioc_hb_monitor(ioc);
381 bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
382 break;
383
384 case IOC_E_PFFAILED:
385 case IOC_E_HWERROR:
386 bfa_ioc_timer_stop(ioc);
387 /* !!! fall through !!! */
388 case IOC_E_TIMEOUT:
389 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
391 if (event != IOC_E_PFFAILED)
392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
393 break;
394
395 case IOC_E_DISABLE:
396 bfa_ioc_timer_stop(ioc);
397 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
398 break;
399
400 case IOC_E_ENABLE:
401 break;
402
403 default:
404 bfa_sm_fault(ioc, event);
405 }
406 }
407
408 /*
409 * Acquiring address from fabric (entry function)
410 */
411 static void
412 bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
413 {
414 }
415
416 /*
417 * Acquiring address from the fabric
418 */
419 static void
420 bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
421 {
422 bfa_trc(ioc, event);
423
424 switch (event) {
425 case IOC_E_FWRSP_GETATTR:
426 bfa_ioc_check_attr_wwns(ioc);
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
428 break;
429
430 case IOC_E_PFFAILED:
431 case IOC_E_HWERROR:
432 bfa_hb_timer_stop(ioc);
433 case IOC_E_HBFAIL:
434 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
435 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
436 if (event != IOC_E_PFFAILED)
437 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
438 break;
439
440 case IOC_E_DISABLE:
441 bfa_hb_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
443 break;
444
445 case IOC_E_ENABLE:
446 break;
447
448 default:
449 bfa_sm_fault(ioc, event);
450 }
451 }
452
453 static void
454 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
455 {
456 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
457
458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
461 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
462 }
463
464 static void
465 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
466 {
467 bfa_trc(ioc, event);
468
469 switch (event) {
470 case IOC_E_ENABLE:
471 break;
472
473 case IOC_E_DISABLE:
474 bfa_hb_timer_stop(ioc);
475 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
476 break;
477
478 case IOC_E_PFFAILED:
479 case IOC_E_HWERROR:
480 bfa_hb_timer_stop(ioc);
481 /* !!! fall through !!! */
482 case IOC_E_HBFAIL:
483 if (ioc->iocpf.auto_recover)
484 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
485 else
486 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
487
488 bfa_ioc_fail_notify(ioc);
489
490 if (event != IOC_E_PFFAILED)
491 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
492 break;
493
494 default:
495 bfa_sm_fault(ioc, event);
496 }
497 }
498
499
500 static void
501 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
502 {
503 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
504 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
505 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
506 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
507 }
508
509 /*
510 * IOC is being disabled
511 */
512 static void
513 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
514 {
515 bfa_trc(ioc, event);
516
517 switch (event) {
518 case IOC_E_DISABLED:
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
520 break;
521
522 case IOC_E_HWERROR:
523 /*
524 * No state change. Will move to disabled state
525 * after iocpf sm completes failure processing and
526 * moves to disabled state.
527 */
528 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
529 break;
530
531 case IOC_E_HWFAILED:
532 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
533 bfa_ioc_disable_comp(ioc);
534 break;
535
536 default:
537 bfa_sm_fault(ioc, event);
538 }
539 }
540
541 /*
542 * IOC disable completion entry.
543 */
544 static void
545 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
546 {
547 bfa_ioc_disable_comp(ioc);
548 }
549
550 static void
551 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
552 {
553 bfa_trc(ioc, event);
554
555 switch (event) {
556 case IOC_E_ENABLE:
557 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
558 break;
559
560 case IOC_E_DISABLE:
561 ioc->cbfn->disable_cbfn(ioc->bfa);
562 break;
563
564 case IOC_E_DETACH:
565 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
566 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
567 break;
568
569 default:
570 bfa_sm_fault(ioc, event);
571 }
572 }
573
574
575 static void
576 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
577 {
578 bfa_trc(ioc, 0);
579 }
580
581 /*
582 * Hardware initialization retry.
583 */
584 static void
585 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
586 {
587 bfa_trc(ioc, event);
588
589 switch (event) {
590 case IOC_E_ENABLED:
591 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
592 break;
593
594 case IOC_E_PFFAILED:
595 case IOC_E_HWERROR:
596 /*
597 * Initialization retry failed.
598 */
599 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
600 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
601 if (event != IOC_E_PFFAILED)
602 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
603 break;
604
605 case IOC_E_HWFAILED:
606 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
607 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
608 break;
609
610 case IOC_E_ENABLE:
611 break;
612
613 case IOC_E_DISABLE:
614 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
615 break;
616
617 case IOC_E_DETACH:
618 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
619 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
620 break;
621
622 default:
623 bfa_sm_fault(ioc, event);
624 }
625 }
626
627
628 static void
629 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
630 {
631 bfa_trc(ioc, 0);
632 }
633
634 /*
635 * IOC failure.
636 */
637 static void
638 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
639 {
640 bfa_trc(ioc, event);
641
642 switch (event) {
643
644 case IOC_E_ENABLE:
645 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
646 break;
647
648 case IOC_E_DISABLE:
649 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
650 break;
651
652 case IOC_E_DETACH:
653 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
654 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
655 break;
656
657 case IOC_E_HWERROR:
658 /*
659 * HB failure notification, ignore.
660 */
661 break;
662 default:
663 bfa_sm_fault(ioc, event);
664 }
665 }
666
667 static void
668 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
669 {
670 bfa_trc(ioc, 0);
671 }
672
673 static void
674 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
675 {
676 bfa_trc(ioc, event);
677
678 switch (event) {
679 case IOC_E_ENABLE:
680 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
681 break;
682
683 case IOC_E_DISABLE:
684 ioc->cbfn->disable_cbfn(ioc->bfa);
685 break;
686
687 case IOC_E_DETACH:
688 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
689 break;
690
691 default:
692 bfa_sm_fault(ioc, event);
693 }
694 }
695
696 /*
697 * IOCPF State Machine
698 */
699
700 /*
701 * Reset entry actions -- initialize state machine
702 */
703 static void
704 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
705 {
706 iocpf->fw_mismatch_notified = BFA_FALSE;
707 iocpf->auto_recover = bfa_auto_recover;
708 }
709
710 /*
711 * Beginning state. IOC is in reset state.
712 */
713 static void
714 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
715 {
716 struct bfa_ioc_s *ioc = iocpf->ioc;
717
718 bfa_trc(ioc, event);
719
720 switch (event) {
721 case IOCPF_E_ENABLE:
722 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
723 break;
724
725 case IOCPF_E_STOP:
726 break;
727
728 default:
729 bfa_sm_fault(ioc, event);
730 }
731 }
732
733 /*
734 * Semaphore should be acquired for version check.
735 */
736 static void
737 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
738 {
739 struct bfi_ioc_image_hdr_s fwhdr;
740 u32 r32, fwstate, pgnum, pgoff, loff = 0;
741 int i;
742
743 /*
744 * Spin on init semaphore to serialize.
745 */
746 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
747 while (r32 & 0x1) {
748 udelay(20);
749 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
750 }
751
752 /* h/w sem init */
753 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
754 if (fwstate == BFI_IOC_UNINIT) {
755 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
756 goto sem_get;
757 }
758
759 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
760
761 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
762 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
763 goto sem_get;
764 }
765
766 /*
767 * Clear fwver hdr
768 */
769 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
770 pgoff = PSS_SMEM_PGOFF(loff);
771 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
772
773 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
774 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
775 loff += sizeof(u32);
776 }
777
778 bfa_trc(iocpf->ioc, fwstate);
779 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
780 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
781 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
782
783 /*
784 * Unlock the hw semaphore. Should be here only once per boot.
785 */
786 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
787 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
788
789 /*
790 * unlock init semaphore.
791 */
792 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
793
794 sem_get:
795 bfa_ioc_hw_sem_get(iocpf->ioc);
796 }
797
798 /*
799 * Awaiting h/w semaphore to continue with version check.
800 */
801 static void
802 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
803 {
804 struct bfa_ioc_s *ioc = iocpf->ioc;
805
806 bfa_trc(ioc, event);
807
808 switch (event) {
809 case IOCPF_E_SEMLOCKED:
810 if (bfa_ioc_firmware_lock(ioc)) {
811 if (bfa_ioc_sync_start(ioc)) {
812 bfa_ioc_sync_join(ioc);
813 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
814 } else {
815 bfa_ioc_firmware_unlock(ioc);
816 writel(1, ioc->ioc_regs.ioc_sem_reg);
817 bfa_sem_timer_start(ioc);
818 }
819 } else {
820 writel(1, ioc->ioc_regs.ioc_sem_reg);
821 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
822 }
823 break;
824
825 case IOCPF_E_SEM_ERROR:
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
827 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
828 break;
829
830 case IOCPF_E_DISABLE:
831 bfa_sem_timer_stop(ioc);
832 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
833 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
834 break;
835
836 case IOCPF_E_STOP:
837 bfa_sem_timer_stop(ioc);
838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
839 break;
840
841 default:
842 bfa_sm_fault(ioc, event);
843 }
844 }
845
846 /*
847 * Notify enable completion callback.
848 */
849 static void
850 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
851 {
852 /*
853 * Call only the first time sm enters fwmismatch state.
854 */
855 if (iocpf->fw_mismatch_notified == BFA_FALSE)
856 bfa_ioc_pf_fwmismatch(iocpf->ioc);
857
858 iocpf->fw_mismatch_notified = BFA_TRUE;
859 bfa_iocpf_timer_start(iocpf->ioc);
860 }
861
862 /*
863 * Awaiting firmware version match.
864 */
865 static void
866 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
867 {
868 struct bfa_ioc_s *ioc = iocpf->ioc;
869
870 bfa_trc(ioc, event);
871
872 switch (event) {
873 case IOCPF_E_TIMEOUT:
874 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
875 break;
876
877 case IOCPF_E_DISABLE:
878 bfa_iocpf_timer_stop(ioc);
879 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
880 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
881 break;
882
883 case IOCPF_E_STOP:
884 bfa_iocpf_timer_stop(ioc);
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
886 break;
887
888 default:
889 bfa_sm_fault(ioc, event);
890 }
891 }
892
893 /*
894 * Request for semaphore.
895 */
896 static void
897 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
898 {
899 bfa_ioc_hw_sem_get(iocpf->ioc);
900 }
901
902 /*
903 * Awaiting semaphore for h/w initialzation.
904 */
905 static void
906 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
907 {
908 struct bfa_ioc_s *ioc = iocpf->ioc;
909
910 bfa_trc(ioc, event);
911
912 switch (event) {
913 case IOCPF_E_SEMLOCKED:
914 if (bfa_ioc_sync_complete(ioc)) {
915 bfa_ioc_sync_join(ioc);
916 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
917 } else {
918 writel(1, ioc->ioc_regs.ioc_sem_reg);
919 bfa_sem_timer_start(ioc);
920 }
921 break;
922
923 case IOCPF_E_SEM_ERROR:
924 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
925 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
926 break;
927
928 case IOCPF_E_DISABLE:
929 bfa_sem_timer_stop(ioc);
930 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
931 break;
932
933 default:
934 bfa_sm_fault(ioc, event);
935 }
936 }
937
938 static void
939 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
940 {
941 iocpf->poll_time = 0;
942 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
943 }
944
945 /*
946 * Hardware is being initialized. Interrupts are enabled.
947 * Holding hardware semaphore lock.
948 */
949 static void
950 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
951 {
952 struct bfa_ioc_s *ioc = iocpf->ioc;
953
954 bfa_trc(ioc, event);
955
956 switch (event) {
957 case IOCPF_E_FWREADY:
958 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
959 break;
960
961 case IOCPF_E_TIMEOUT:
962 writel(1, ioc->ioc_regs.ioc_sem_reg);
963 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
964 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
965 break;
966
967 case IOCPF_E_DISABLE:
968 bfa_iocpf_timer_stop(ioc);
969 bfa_ioc_sync_leave(ioc);
970 writel(1, ioc->ioc_regs.ioc_sem_reg);
971 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
972 break;
973
974 default:
975 bfa_sm_fault(ioc, event);
976 }
977 }
978
979 static void
980 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
981 {
982 bfa_iocpf_timer_start(iocpf->ioc);
983 /*
984 * Enable Interrupts before sending fw IOC ENABLE cmd.
985 */
986 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
987 bfa_ioc_send_enable(iocpf->ioc);
988 }
989
990 /*
991 * Host IOC function is being enabled, awaiting response from firmware.
992 * Semaphore is acquired.
993 */
994 static void
995 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
996 {
997 struct bfa_ioc_s *ioc = iocpf->ioc;
998
999 bfa_trc(ioc, event);
1000
1001 switch (event) {
1002 case IOCPF_E_FWRSP_ENABLE:
1003 bfa_iocpf_timer_stop(ioc);
1004 writel(1, ioc->ioc_regs.ioc_sem_reg);
1005 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
1006 break;
1007
1008 case IOCPF_E_INITFAIL:
1009 bfa_iocpf_timer_stop(ioc);
1010 /*
1011 * !!! fall through !!!
1012 */
1013
1014 case IOCPF_E_TIMEOUT:
1015 writel(1, ioc->ioc_regs.ioc_sem_reg);
1016 if (event == IOCPF_E_TIMEOUT)
1017 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1018 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1019 break;
1020
1021 case IOCPF_E_DISABLE:
1022 bfa_iocpf_timer_stop(ioc);
1023 writel(1, ioc->ioc_regs.ioc_sem_reg);
1024 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1025 break;
1026
1027 default:
1028 bfa_sm_fault(ioc, event);
1029 }
1030 }
1031
1032 static void
1033 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
1034 {
1035 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1036 }
1037
1038 static void
1039 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1040 {
1041 struct bfa_ioc_s *ioc = iocpf->ioc;
1042
1043 bfa_trc(ioc, event);
1044
1045 switch (event) {
1046 case IOCPF_E_DISABLE:
1047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1048 break;
1049
1050 case IOCPF_E_GETATTRFAIL:
1051 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1052 break;
1053
1054 case IOCPF_E_FAIL:
1055 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1056 break;
1057
1058 default:
1059 bfa_sm_fault(ioc, event);
1060 }
1061 }
1062
1063 static void
1064 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1065 {
1066 bfa_iocpf_timer_start(iocpf->ioc);
1067 bfa_ioc_send_disable(iocpf->ioc);
1068 }
1069
1070 /*
1071 * IOC is being disabled
1072 */
1073 static void
1074 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1075 {
1076 struct bfa_ioc_s *ioc = iocpf->ioc;
1077
1078 bfa_trc(ioc, event);
1079
1080 switch (event) {
1081 case IOCPF_E_FWRSP_DISABLE:
1082 bfa_iocpf_timer_stop(ioc);
1083 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1084 break;
1085
1086 case IOCPF_E_FAIL:
1087 bfa_iocpf_timer_stop(ioc);
1088 /*
1089 * !!! fall through !!!
1090 */
1091
1092 case IOCPF_E_TIMEOUT:
1093 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1095 break;
1096
1097 case IOCPF_E_FWRSP_ENABLE:
1098 break;
1099
1100 default:
1101 bfa_sm_fault(ioc, event);
1102 }
1103 }
1104
1105 static void
1106 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1107 {
1108 bfa_ioc_hw_sem_get(iocpf->ioc);
1109 }
1110
1111 /*
1112 * IOC hb ack request is being removed.
1113 */
1114 static void
1115 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1116 {
1117 struct bfa_ioc_s *ioc = iocpf->ioc;
1118
1119 bfa_trc(ioc, event);
1120
1121 switch (event) {
1122 case IOCPF_E_SEMLOCKED:
1123 bfa_ioc_sync_leave(ioc);
1124 writel(1, ioc->ioc_regs.ioc_sem_reg);
1125 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1126 break;
1127
1128 case IOCPF_E_SEM_ERROR:
1129 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1130 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1131 break;
1132
1133 case IOCPF_E_FAIL:
1134 break;
1135
1136 default:
1137 bfa_sm_fault(ioc, event);
1138 }
1139 }
1140
1141 /*
1142 * IOC disable completion entry.
1143 */
1144 static void
1145 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1146 {
1147 bfa_ioc_mbox_flush(iocpf->ioc);
1148 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1149 }
1150
1151 static void
1152 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1153 {
1154 struct bfa_ioc_s *ioc = iocpf->ioc;
1155
1156 bfa_trc(ioc, event);
1157
1158 switch (event) {
1159 case IOCPF_E_ENABLE:
1160 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1161 break;
1162
1163 case IOCPF_E_STOP:
1164 bfa_ioc_firmware_unlock(ioc);
1165 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1166 break;
1167
1168 default:
1169 bfa_sm_fault(ioc, event);
1170 }
1171 }
1172
1173 static void
1174 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1175 {
1176 bfa_ioc_debug_save_ftrc(iocpf->ioc);
1177 bfa_ioc_hw_sem_get(iocpf->ioc);
1178 }
1179
1180 /*
1181 * Hardware initialization failed.
1182 */
1183 static void
1184 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1185 {
1186 struct bfa_ioc_s *ioc = iocpf->ioc;
1187
1188 bfa_trc(ioc, event);
1189
1190 switch (event) {
1191 case IOCPF_E_SEMLOCKED:
1192 bfa_ioc_notify_fail(ioc);
1193 bfa_ioc_sync_leave(ioc);
1194 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1195 writel(1, ioc->ioc_regs.ioc_sem_reg);
1196 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1197 break;
1198
1199 case IOCPF_E_SEM_ERROR:
1200 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1201 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1202 break;
1203
1204 case IOCPF_E_DISABLE:
1205 bfa_sem_timer_stop(ioc);
1206 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1207 break;
1208
1209 case IOCPF_E_STOP:
1210 bfa_sem_timer_stop(ioc);
1211 bfa_ioc_firmware_unlock(ioc);
1212 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1213 break;
1214
1215 case IOCPF_E_FAIL:
1216 break;
1217
1218 default:
1219 bfa_sm_fault(ioc, event);
1220 }
1221 }
1222
1223 static void
1224 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1225 {
1226 bfa_trc(iocpf->ioc, 0);
1227 }
1228
1229 /*
1230 * Hardware initialization failed.
1231 */
1232 static void
1233 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1234 {
1235 struct bfa_ioc_s *ioc = iocpf->ioc;
1236
1237 bfa_trc(ioc, event);
1238
1239 switch (event) {
1240 case IOCPF_E_DISABLE:
1241 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1242 break;
1243
1244 case IOCPF_E_STOP:
1245 bfa_ioc_firmware_unlock(ioc);
1246 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1247 break;
1248
1249 default:
1250 bfa_sm_fault(ioc, event);
1251 }
1252 }
1253
1254 static void
1255 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1256 {
1257 /*
1258 * Mark IOC as failed in hardware and stop firmware.
1259 */
1260 bfa_ioc_lpu_stop(iocpf->ioc);
1261
1262 /*
1263 * Flush any queued up mailbox requests.
1264 */
1265 bfa_ioc_mbox_flush(iocpf->ioc);
1266
1267 bfa_ioc_hw_sem_get(iocpf->ioc);
1268 }
1269
1270 static void
1271 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1272 {
1273 struct bfa_ioc_s *ioc = iocpf->ioc;
1274
1275 bfa_trc(ioc, event);
1276
1277 switch (event) {
1278 case IOCPF_E_SEMLOCKED:
1279 bfa_ioc_sync_ack(ioc);
1280 bfa_ioc_notify_fail(ioc);
1281 if (!iocpf->auto_recover) {
1282 bfa_ioc_sync_leave(ioc);
1283 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1284 writel(1, ioc->ioc_regs.ioc_sem_reg);
1285 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1286 } else {
1287 if (bfa_ioc_sync_complete(ioc))
1288 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1289 else {
1290 writel(1, ioc->ioc_regs.ioc_sem_reg);
1291 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1292 }
1293 }
1294 break;
1295
1296 case IOCPF_E_SEM_ERROR:
1297 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1298 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1299 break;
1300
1301 case IOCPF_E_DISABLE:
1302 bfa_sem_timer_stop(ioc);
1303 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1304 break;
1305
1306 case IOCPF_E_FAIL:
1307 break;
1308
1309 default:
1310 bfa_sm_fault(ioc, event);
1311 }
1312 }
1313
1314 static void
1315 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1316 {
1317 bfa_trc(iocpf->ioc, 0);
1318 }
1319
1320 /*
1321 * IOC is in failed state.
1322 */
1323 static void
1324 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1325 {
1326 struct bfa_ioc_s *ioc = iocpf->ioc;
1327
1328 bfa_trc(ioc, event);
1329
1330 switch (event) {
1331 case IOCPF_E_DISABLE:
1332 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1333 break;
1334
1335 default:
1336 bfa_sm_fault(ioc, event);
1337 }
1338 }
1339
1340 /*
1341 * BFA IOC private functions
1342 */
1343
1344 /*
1345 * Notify common modules registered for notification.
1346 */
1347 static void
1348 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1349 {
1350 struct bfa_ioc_notify_s *notify;
1351 struct list_head *qe;
1352
1353 list_for_each(qe, &ioc->notify_q) {
1354 notify = (struct bfa_ioc_notify_s *)qe;
1355 notify->cbfn(notify->cbarg, event);
1356 }
1357 }
1358
1359 static void
1360 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1361 {
1362 ioc->cbfn->disable_cbfn(ioc->bfa);
1363 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1364 }
1365
1366 bfa_boolean_t
1367 bfa_ioc_sem_get(void __iomem *sem_reg)
1368 {
1369 u32 r32;
1370 int cnt = 0;
1371 #define BFA_SEM_SPINCNT 3000
1372
1373 r32 = readl(sem_reg);
1374
1375 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1376 cnt++;
1377 udelay(2);
1378 r32 = readl(sem_reg);
1379 }
1380
1381 if (!(r32 & 1))
1382 return BFA_TRUE;
1383
1384 return BFA_FALSE;
1385 }
1386
1387 static void
1388 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1389 {
1390 u32 r32;
1391
1392 /*
1393 * First read to the semaphore register will return 0, subsequent reads
1394 * will return 1. Semaphore is released by writing 1 to the register
1395 */
1396 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1397 if (r32 == ~0) {
1398 WARN_ON(r32 == ~0);
1399 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1400 return;
1401 }
1402 if (!(r32 & 1)) {
1403 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1404 return;
1405 }
1406
1407 bfa_sem_timer_start(ioc);
1408 }
1409
1410 /*
1411 * Initialize LPU local memory (aka secondary memory / SRAM)
1412 */
1413 static void
1414 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1415 {
1416 u32 pss_ctl;
1417 int i;
1418 #define PSS_LMEM_INIT_TIME 10000
1419
1420 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1421 pss_ctl &= ~__PSS_LMEM_RESET;
1422 pss_ctl |= __PSS_LMEM_INIT_EN;
1423
1424 /*
1425 * i2c workaround 12.5khz clock
1426 */
1427 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1428 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1429
1430 /*
1431 * wait for memory initialization to be complete
1432 */
1433 i = 0;
1434 do {
1435 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1436 i++;
1437 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1438
1439 /*
1440 * If memory initialization is not successful, IOC timeout will catch
1441 * such failures.
1442 */
1443 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1444 bfa_trc(ioc, pss_ctl);
1445
1446 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1447 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1448 }
1449
1450 static void
1451 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1452 {
1453 u32 pss_ctl;
1454
1455 /*
1456 * Take processor out of reset.
1457 */
1458 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1459 pss_ctl &= ~__PSS_LPU0_RESET;
1460
1461 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1462 }
1463
1464 static void
1465 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1466 {
1467 u32 pss_ctl;
1468
1469 /*
1470 * Put processors in reset.
1471 */
1472 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1473 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1474
1475 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1476 }
1477
1478 /*
1479 * Get driver and firmware versions.
1480 */
1481 void
1482 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1483 {
1484 u32 pgnum, pgoff;
1485 u32 loff = 0;
1486 int i;
1487 u32 *fwsig = (u32 *) fwhdr;
1488
1489 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1490 pgoff = PSS_SMEM_PGOFF(loff);
1491 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1492
1493 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1494 i++) {
1495 fwsig[i] =
1496 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1497 loff += sizeof(u32);
1498 }
1499 }
1500
1501 /*
1502 * Returns TRUE if same.
1503 */
1504 bfa_boolean_t
1505 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1506 {
1507 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1508 int i;
1509
1510 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1511 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1512
1513 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1514 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1515 bfa_trc(ioc, i);
1516 bfa_trc(ioc, fwhdr->md5sum[i]);
1517 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1518 return BFA_FALSE;
1519 }
1520 }
1521
1522 bfa_trc(ioc, fwhdr->md5sum[0]);
1523 return BFA_TRUE;
1524 }
1525
1526 /*
1527 * Return true if current running version is valid. Firmware signature and
1528 * execution context (driver/bios) must match.
1529 */
1530 static bfa_boolean_t
1531 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1532 {
1533 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1534
1535 bfa_ioc_fwver_get(ioc, &fwhdr);
1536 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1537 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1538
1539 if (fwhdr.signature != drv_fwhdr->signature) {
1540 bfa_trc(ioc, fwhdr.signature);
1541 bfa_trc(ioc, drv_fwhdr->signature);
1542 return BFA_FALSE;
1543 }
1544
1545 if (swab32(fwhdr.bootenv) != boot_env) {
1546 bfa_trc(ioc, fwhdr.bootenv);
1547 bfa_trc(ioc, boot_env);
1548 return BFA_FALSE;
1549 }
1550
1551 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1552 }
1553
1554 /*
1555 * Conditionally flush any pending message from firmware at start.
1556 */
1557 static void
1558 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1559 {
1560 u32 r32;
1561
1562 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1563 if (r32)
1564 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1565 }
1566
1567 static void
1568 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1569 {
1570 enum bfi_ioc_state ioc_fwstate;
1571 bfa_boolean_t fwvalid;
1572 u32 boot_type;
1573 u32 boot_env;
1574
1575 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1576
1577 if (force)
1578 ioc_fwstate = BFI_IOC_UNINIT;
1579
1580 bfa_trc(ioc, ioc_fwstate);
1581
1582 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1583 boot_env = BFI_FWBOOT_ENV_OS;
1584
1585 /*
1586 * check if firmware is valid
1587 */
1588 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1589 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1590
1591 if (!fwvalid) {
1592 bfa_ioc_boot(ioc, boot_type, boot_env);
1593 bfa_ioc_poll_fwinit(ioc);
1594 return;
1595 }
1596
1597 /*
1598 * If hardware initialization is in progress (initialized by other IOC),
1599 * just wait for an initialization completion interrupt.
1600 */
1601 if (ioc_fwstate == BFI_IOC_INITING) {
1602 bfa_ioc_poll_fwinit(ioc);
1603 return;
1604 }
1605
1606 /*
1607 * If IOC function is disabled and firmware version is same,
1608 * just re-enable IOC.
1609 *
1610 * If option rom, IOC must not be in operational state. With
1611 * convergence, IOC will be in operational state when 2nd driver
1612 * is loaded.
1613 */
1614 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1615
1616 /*
1617 * When using MSI-X any pending firmware ready event should
1618 * be flushed. Otherwise MSI-X interrupts are not delivered.
1619 */
1620 bfa_ioc_msgflush(ioc);
1621 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1622 return;
1623 }
1624
1625 /*
1626 * Initialize the h/w for any other states.
1627 */
1628 bfa_ioc_boot(ioc, boot_type, boot_env);
1629 bfa_ioc_poll_fwinit(ioc);
1630 }
1631
1632 static void
1633 bfa_ioc_timeout(void *ioc_arg)
1634 {
1635 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1636
1637 bfa_trc(ioc, 0);
1638 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1639 }
1640
1641 void
1642 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1643 {
1644 u32 *msgp = (u32 *) ioc_msg;
1645 u32 i;
1646
1647 bfa_trc(ioc, msgp[0]);
1648 bfa_trc(ioc, len);
1649
1650 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1651
1652 /*
1653 * first write msg to mailbox registers
1654 */
1655 for (i = 0; i < len / sizeof(u32); i++)
1656 writel(cpu_to_le32(msgp[i]),
1657 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1658
1659 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1660 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1661
1662 /*
1663 * write 1 to mailbox CMD to trigger LPU event
1664 */
1665 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1666 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1667 }
1668
1669 static void
1670 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1671 {
1672 struct bfi_ioc_ctrl_req_s enable_req;
1673 struct timeval tv;
1674
1675 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1676 bfa_ioc_portid(ioc));
1677 enable_req.clscode = cpu_to_be16(ioc->clscode);
1678 do_gettimeofday(&tv);
1679 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1680 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1681 }
1682
1683 static void
1684 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1685 {
1686 struct bfi_ioc_ctrl_req_s disable_req;
1687
1688 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1689 bfa_ioc_portid(ioc));
1690 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1691 }
1692
1693 static void
1694 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1695 {
1696 struct bfi_ioc_getattr_req_s attr_req;
1697
1698 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1699 bfa_ioc_portid(ioc));
1700 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1701 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1702 }
1703
1704 static void
1705 bfa_ioc_hb_check(void *cbarg)
1706 {
1707 struct bfa_ioc_s *ioc = cbarg;
1708 u32 hb_count;
1709
1710 hb_count = readl(ioc->ioc_regs.heartbeat);
1711 if (ioc->hb_count == hb_count) {
1712 bfa_ioc_recover(ioc);
1713 return;
1714 } else {
1715 ioc->hb_count = hb_count;
1716 }
1717
1718 bfa_ioc_mbox_poll(ioc);
1719 bfa_hb_timer_start(ioc);
1720 }
1721
1722 static void
1723 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1724 {
1725 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1726 bfa_hb_timer_start(ioc);
1727 }
1728
1729 /*
1730 * Initiate a full firmware download.
1731 */
1732 static void
1733 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1734 u32 boot_env)
1735 {
1736 u32 *fwimg;
1737 u32 pgnum, pgoff;
1738 u32 loff = 0;
1739 u32 chunkno = 0;
1740 u32 i;
1741 u32 asicmode;
1742
1743 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1744 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1745
1746 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1747 pgoff = PSS_SMEM_PGOFF(loff);
1748
1749 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1750
1751 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1752
1753 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1754 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1755 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1756 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1757 }
1758
1759 /*
1760 * write smem
1761 */
1762 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1763 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1764
1765 loff += sizeof(u32);
1766
1767 /*
1768 * handle page offset wrap around
1769 */
1770 loff = PSS_SMEM_PGOFF(loff);
1771 if (loff == 0) {
1772 pgnum++;
1773 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1774 }
1775 }
1776
1777 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1778 ioc->ioc_regs.host_page_num_fn);
1779
1780 /*
1781 * Set boot type and device mode at the end.
1782 */
1783 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1784 ioc->port0_mode, ioc->port1_mode);
1785 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1786 swab32(asicmode));
1787 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1788 swab32(boot_type));
1789 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1790 swab32(boot_env));
1791 }
1792
1793
1794 /*
1795 * Update BFA configuration from firmware configuration.
1796 */
1797 static void
1798 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1799 {
1800 struct bfi_ioc_attr_s *attr = ioc->attr;
1801
1802 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1803 attr->card_type = be32_to_cpu(attr->card_type);
1804 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1805 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1806
1807 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1808 }
1809
1810 /*
1811 * Attach time initialization of mbox logic.
1812 */
1813 static void
1814 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1815 {
1816 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1817 int mc;
1818
1819 INIT_LIST_HEAD(&mod->cmd_q);
1820 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1821 mod->mbhdlr[mc].cbfn = NULL;
1822 mod->mbhdlr[mc].cbarg = ioc->bfa;
1823 }
1824 }
1825
1826 /*
1827 * Mbox poll timer -- restarts any pending mailbox requests.
1828 */
1829 static void
1830 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1831 {
1832 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1833 struct bfa_mbox_cmd_s *cmd;
1834 u32 stat;
1835
1836 /*
1837 * If no command pending, do nothing
1838 */
1839 if (list_empty(&mod->cmd_q))
1840 return;
1841
1842 /*
1843 * If previous command is not yet fetched by firmware, do nothing
1844 */
1845 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1846 if (stat)
1847 return;
1848
1849 /*
1850 * Enqueue command to firmware.
1851 */
1852 bfa_q_deq(&mod->cmd_q, &cmd);
1853 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1854 }
1855
1856 /*
1857 * Cleanup any pending requests.
1858 */
1859 static void
1860 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1861 {
1862 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1863 struct bfa_mbox_cmd_s *cmd;
1864
1865 while (!list_empty(&mod->cmd_q))
1866 bfa_q_deq(&mod->cmd_q, &cmd);
1867 }
1868
1869 /*
1870 * Read data from SMEM to host through PCI memmap
1871 *
1872 * @param[in] ioc memory for IOC
1873 * @param[in] tbuf app memory to store data from smem
1874 * @param[in] soff smem offset
1875 * @param[in] sz size of smem in bytes
1876 */
1877 static bfa_status_t
1878 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1879 {
1880 u32 pgnum, loff;
1881 __be32 r32;
1882 int i, len;
1883 u32 *buf = tbuf;
1884
1885 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1886 loff = PSS_SMEM_PGOFF(soff);
1887 bfa_trc(ioc, pgnum);
1888 bfa_trc(ioc, loff);
1889 bfa_trc(ioc, sz);
1890
1891 /*
1892 * Hold semaphore to serialize pll init and fwtrc.
1893 */
1894 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1895 bfa_trc(ioc, 0);
1896 return BFA_STATUS_FAILED;
1897 }
1898
1899 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1900
1901 len = sz/sizeof(u32);
1902 bfa_trc(ioc, len);
1903 for (i = 0; i < len; i++) {
1904 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1905 buf[i] = be32_to_cpu(r32);
1906 loff += sizeof(u32);
1907
1908 /*
1909 * handle page offset wrap around
1910 */
1911 loff = PSS_SMEM_PGOFF(loff);
1912 if (loff == 0) {
1913 pgnum++;
1914 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1915 }
1916 }
1917 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1918 ioc->ioc_regs.host_page_num_fn);
1919 /*
1920 * release semaphore.
1921 */
1922 readl(ioc->ioc_regs.ioc_init_sem_reg);
1923 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1924
1925 bfa_trc(ioc, pgnum);
1926 return BFA_STATUS_OK;
1927 }
1928
1929 /*
1930 * Clear SMEM data from host through PCI memmap
1931 *
1932 * @param[in] ioc memory for IOC
1933 * @param[in] soff smem offset
1934 * @param[in] sz size of smem in bytes
1935 */
1936 static bfa_status_t
1937 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1938 {
1939 int i, len;
1940 u32 pgnum, loff;
1941
1942 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1943 loff = PSS_SMEM_PGOFF(soff);
1944 bfa_trc(ioc, pgnum);
1945 bfa_trc(ioc, loff);
1946 bfa_trc(ioc, sz);
1947
1948 /*
1949 * Hold semaphore to serialize pll init and fwtrc.
1950 */
1951 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1952 bfa_trc(ioc, 0);
1953 return BFA_STATUS_FAILED;
1954 }
1955
1956 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1957
1958 len = sz/sizeof(u32); /* len in words */
1959 bfa_trc(ioc, len);
1960 for (i = 0; i < len; i++) {
1961 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1962 loff += sizeof(u32);
1963
1964 /*
1965 * handle page offset wrap around
1966 */
1967 loff = PSS_SMEM_PGOFF(loff);
1968 if (loff == 0) {
1969 pgnum++;
1970 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1971 }
1972 }
1973 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1974 ioc->ioc_regs.host_page_num_fn);
1975
1976 /*
1977 * release semaphore.
1978 */
1979 readl(ioc->ioc_regs.ioc_init_sem_reg);
1980 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1981 bfa_trc(ioc, pgnum);
1982 return BFA_STATUS_OK;
1983 }
1984
1985 static void
1986 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1987 {
1988 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1989
1990 /*
1991 * Notify driver and common modules registered for notification.
1992 */
1993 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1994 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1995
1996 bfa_ioc_debug_save_ftrc(ioc);
1997
1998 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1999 "Heart Beat of IOC has failed\n");
2000 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2001
2002 }
2003
2004 static void
2005 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2006 {
2007 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2008 /*
2009 * Provide enable completion callback.
2010 */
2011 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2012 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2013 "Running firmware version is incompatible "
2014 "with the driver version\n");
2015 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2016 }
2017
2018 bfa_status_t
2019 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2020 {
2021
2022 /*
2023 * Hold semaphore so that nobody can access the chip during init.
2024 */
2025 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2026
2027 bfa_ioc_pll_init_asic(ioc);
2028
2029 ioc->pllinit = BFA_TRUE;
2030
2031 /*
2032 * Initialize LMEM
2033 */
2034 bfa_ioc_lmem_init(ioc);
2035
2036 /*
2037 * release semaphore.
2038 */
2039 readl(ioc->ioc_regs.ioc_init_sem_reg);
2040 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2041
2042 return BFA_STATUS_OK;
2043 }
2044
2045 /*
2046 * Interface used by diag module to do firmware boot with memory test
2047 * as the entry vector.
2048 */
2049 void
2050 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2051 {
2052 bfa_ioc_stats(ioc, ioc_boots);
2053
2054 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2055 return;
2056
2057 /*
2058 * Initialize IOC state of all functions on a chip reset.
2059 */
2060 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2061 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2062 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2063 } else {
2064 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2065 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2066 }
2067
2068 bfa_ioc_msgflush(ioc);
2069 bfa_ioc_download_fw(ioc, boot_type, boot_env);
2070 bfa_ioc_lpu_start(ioc);
2071 }
2072
2073 /*
2074 * Enable/disable IOC failure auto recovery.
2075 */
2076 void
2077 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2078 {
2079 bfa_auto_recover = auto_recover;
2080 }
2081
2082
2083
2084 bfa_boolean_t
2085 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2086 {
2087 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2088 }
2089
2090 bfa_boolean_t
2091 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2092 {
2093 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2094
2095 return ((r32 != BFI_IOC_UNINIT) &&
2096 (r32 != BFI_IOC_INITING) &&
2097 (r32 != BFI_IOC_MEMTEST));
2098 }
2099
2100 bfa_boolean_t
2101 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2102 {
2103 __be32 *msgp = mbmsg;
2104 u32 r32;
2105 int i;
2106
2107 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2108 if ((r32 & 1) == 0)
2109 return BFA_FALSE;
2110
2111 /*
2112 * read the MBOX msg
2113 */
2114 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2115 i++) {
2116 r32 = readl(ioc->ioc_regs.lpu_mbox +
2117 i * sizeof(u32));
2118 msgp[i] = cpu_to_be32(r32);
2119 }
2120
2121 /*
2122 * turn off mailbox interrupt by clearing mailbox status
2123 */
2124 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2125 readl(ioc->ioc_regs.lpu_mbox_cmd);
2126
2127 return BFA_TRUE;
2128 }
2129
2130 void
2131 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2132 {
2133 union bfi_ioc_i2h_msg_u *msg;
2134 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2135
2136 msg = (union bfi_ioc_i2h_msg_u *) m;
2137
2138 bfa_ioc_stats(ioc, ioc_isrs);
2139
2140 switch (msg->mh.msg_id) {
2141 case BFI_IOC_I2H_HBEAT:
2142 break;
2143
2144 case BFI_IOC_I2H_ENABLE_REPLY:
2145 ioc->port_mode = ioc->port_mode_cfg =
2146 (enum bfa_mode_s)msg->fw_event.port_mode;
2147 ioc->ad_cap_bm = msg->fw_event.cap_bm;
2148 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2149 break;
2150
2151 case BFI_IOC_I2H_DISABLE_REPLY:
2152 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2153 break;
2154
2155 case BFI_IOC_I2H_GETATTR_REPLY:
2156 bfa_ioc_getattr_reply(ioc);
2157 break;
2158
2159 case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2160 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2161 break;
2162
2163 default:
2164 bfa_trc(ioc, msg->mh.msg_id);
2165 WARN_ON(1);
2166 }
2167 }
2168
2169 /*
2170 * IOC attach time initialization and setup.
2171 *
2172 * @param[in] ioc memory for IOC
2173 * @param[in] bfa driver instance structure
2174 */
2175 void
2176 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2177 struct bfa_timer_mod_s *timer_mod)
2178 {
2179 ioc->bfa = bfa;
2180 ioc->cbfn = cbfn;
2181 ioc->timer_mod = timer_mod;
2182 ioc->fcmode = BFA_FALSE;
2183 ioc->pllinit = BFA_FALSE;
2184 ioc->dbg_fwsave_once = BFA_TRUE;
2185 ioc->iocpf.ioc = ioc;
2186
2187 bfa_ioc_mbox_attach(ioc);
2188 INIT_LIST_HEAD(&ioc->notify_q);
2189
2190 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2191 bfa_fsm_send_event(ioc, IOC_E_RESET);
2192 }
2193
2194 /*
2195 * Driver detach time IOC cleanup.
2196 */
2197 void
2198 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2199 {
2200 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2201 INIT_LIST_HEAD(&ioc->notify_q);
2202 }
2203
2204 /*
2205 * Setup IOC PCI properties.
2206 *
2207 * @param[in] pcidev PCI device information for this IOC
2208 */
2209 void
2210 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2211 enum bfi_pcifn_class clscode)
2212 {
2213 ioc->clscode = clscode;
2214 ioc->pcidev = *pcidev;
2215
2216 /*
2217 * Initialize IOC and device personality
2218 */
2219 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2220 ioc->asic_mode = BFI_ASIC_MODE_FC;
2221
2222 switch (pcidev->device_id) {
2223 case BFA_PCI_DEVICE_ID_FC_8G1P:
2224 case BFA_PCI_DEVICE_ID_FC_8G2P:
2225 ioc->asic_gen = BFI_ASIC_GEN_CB;
2226 ioc->fcmode = BFA_TRUE;
2227 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2228 ioc->ad_cap_bm = BFA_CM_HBA;
2229 break;
2230
2231 case BFA_PCI_DEVICE_ID_CT:
2232 ioc->asic_gen = BFI_ASIC_GEN_CT;
2233 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2234 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2235 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2236 ioc->ad_cap_bm = BFA_CM_CNA;
2237 break;
2238
2239 case BFA_PCI_DEVICE_ID_CT_FC:
2240 ioc->asic_gen = BFI_ASIC_GEN_CT;
2241 ioc->fcmode = BFA_TRUE;
2242 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2243 ioc->ad_cap_bm = BFA_CM_HBA;
2244 break;
2245
2246 case BFA_PCI_DEVICE_ID_CT2:
2247 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2248 if (clscode == BFI_PCIFN_CLASS_FC &&
2249 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2250 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2251 ioc->fcmode = BFA_TRUE;
2252 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2253 ioc->ad_cap_bm = BFA_CM_HBA;
2254 } else {
2255 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2256 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2257 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2258 ioc->port_mode =
2259 ioc->port_mode_cfg = BFA_MODE_CNA;
2260 ioc->ad_cap_bm = BFA_CM_CNA;
2261 } else {
2262 ioc->port_mode =
2263 ioc->port_mode_cfg = BFA_MODE_NIC;
2264 ioc->ad_cap_bm = BFA_CM_NIC;
2265 }
2266 }
2267 break;
2268
2269 default:
2270 WARN_ON(1);
2271 }
2272
2273 /*
2274 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2275 */
2276 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2277 bfa_ioc_set_cb_hwif(ioc);
2278 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2279 bfa_ioc_set_ct_hwif(ioc);
2280 else {
2281 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2282 bfa_ioc_set_ct2_hwif(ioc);
2283 bfa_ioc_ct2_poweron(ioc);
2284 }
2285
2286 bfa_ioc_map_port(ioc);
2287 bfa_ioc_reg_init(ioc);
2288 }
2289
2290 /*
2291 * Initialize IOC dma memory
2292 *
2293 * @param[in] dm_kva kernel virtual address of IOC dma memory
2294 * @param[in] dm_pa physical address of IOC dma memory
2295 */
2296 void
2297 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2298 {
2299 /*
2300 * dma memory for firmware attribute
2301 */
2302 ioc->attr_dma.kva = dm_kva;
2303 ioc->attr_dma.pa = dm_pa;
2304 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2305 }
2306
2307 void
2308 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2309 {
2310 bfa_ioc_stats(ioc, ioc_enables);
2311 ioc->dbg_fwsave_once = BFA_TRUE;
2312
2313 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2314 }
2315
2316 void
2317 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2318 {
2319 bfa_ioc_stats(ioc, ioc_disables);
2320 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2321 }
2322
2323
2324 /*
2325 * Initialize memory for saving firmware trace. Driver must initialize
2326 * trace memory before call bfa_ioc_enable().
2327 */
2328 void
2329 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2330 {
2331 ioc->dbg_fwsave = dbg_fwsave;
2332 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2333 }
2334
2335 /*
2336 * Register mailbox message handler functions
2337 *
2338 * @param[in] ioc IOC instance
2339 * @param[in] mcfuncs message class handler functions
2340 */
2341 void
2342 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2343 {
2344 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2345 int mc;
2346
2347 for (mc = 0; mc < BFI_MC_MAX; mc++)
2348 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2349 }
2350
2351 /*
2352 * Register mailbox message handler function, to be called by common modules
2353 */
2354 void
2355 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2356 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2357 {
2358 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2359
2360 mod->mbhdlr[mc].cbfn = cbfn;
2361 mod->mbhdlr[mc].cbarg = cbarg;
2362 }
2363
2364 /*
2365 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2366 * Responsibility of caller to serialize
2367 *
2368 * @param[in] ioc IOC instance
2369 * @param[i] cmd Mailbox command
2370 */
2371 void
2372 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2373 {
2374 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2375 u32 stat;
2376
2377 /*
2378 * If a previous command is pending, queue new command
2379 */
2380 if (!list_empty(&mod->cmd_q)) {
2381 list_add_tail(&cmd->qe, &mod->cmd_q);
2382 return;
2383 }
2384
2385 /*
2386 * If mailbox is busy, queue command for poll timer
2387 */
2388 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2389 if (stat) {
2390 list_add_tail(&cmd->qe, &mod->cmd_q);
2391 return;
2392 }
2393
2394 /*
2395 * mailbox is free -- queue command to firmware
2396 */
2397 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2398 }
2399
2400 /*
2401 * Handle mailbox interrupts
2402 */
2403 void
2404 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2405 {
2406 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2407 struct bfi_mbmsg_s m;
2408 int mc;
2409
2410 if (bfa_ioc_msgget(ioc, &m)) {
2411 /*
2412 * Treat IOC message class as special.
2413 */
2414 mc = m.mh.msg_class;
2415 if (mc == BFI_MC_IOC) {
2416 bfa_ioc_isr(ioc, &m);
2417 return;
2418 }
2419
2420 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2421 return;
2422
2423 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2424 }
2425
2426 bfa_ioc_lpu_read_stat(ioc);
2427
2428 /*
2429 * Try to send pending mailbox commands
2430 */
2431 bfa_ioc_mbox_poll(ioc);
2432 }
2433
2434 void
2435 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2436 {
2437 bfa_ioc_stats(ioc, ioc_hbfails);
2438 ioc->stats.hb_count = ioc->hb_count;
2439 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2440 }
2441
2442 /*
2443 * return true if IOC is disabled
2444 */
2445 bfa_boolean_t
2446 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2447 {
2448 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2449 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2450 }
2451
2452 /*
2453 * Return TRUE if IOC is in acquiring address state
2454 */
2455 bfa_boolean_t
2456 bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2457 {
2458 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2459 }
2460
2461 /*
2462 * return true if IOC firmware is different.
2463 */
2464 bfa_boolean_t
2465 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2466 {
2467 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2468 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2469 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2470 }
2471
2472 #define bfa_ioc_state_disabled(__sm) \
2473 (((__sm) == BFI_IOC_UNINIT) || \
2474 ((__sm) == BFI_IOC_INITING) || \
2475 ((__sm) == BFI_IOC_HWINIT) || \
2476 ((__sm) == BFI_IOC_DISABLED) || \
2477 ((__sm) == BFI_IOC_FAIL) || \
2478 ((__sm) == BFI_IOC_CFG_DISABLED))
2479
2480 /*
2481 * Check if adapter is disabled -- both IOCs should be in a disabled
2482 * state.
2483 */
2484 bfa_boolean_t
2485 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2486 {
2487 u32 ioc_state;
2488
2489 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2490 return BFA_FALSE;
2491
2492 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2493 if (!bfa_ioc_state_disabled(ioc_state))
2494 return BFA_FALSE;
2495
2496 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2497 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2498 if (!bfa_ioc_state_disabled(ioc_state))
2499 return BFA_FALSE;
2500 }
2501
2502 return BFA_TRUE;
2503 }
2504
2505 /*
2506 * Reset IOC fwstate registers.
2507 */
2508 void
2509 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2510 {
2511 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2512 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2513 }
2514
2515 #define BFA_MFG_NAME "Brocade"
2516 void
2517 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2518 struct bfa_adapter_attr_s *ad_attr)
2519 {
2520 struct bfi_ioc_attr_s *ioc_attr;
2521
2522 ioc_attr = ioc->attr;
2523
2524 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2525 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2526 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2527 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2528 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2529 sizeof(struct bfa_mfg_vpd_s));
2530
2531 ad_attr->nports = bfa_ioc_get_nports(ioc);
2532 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2533
2534 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2535 /* For now, model descr uses same model string */
2536 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2537
2538 ad_attr->card_type = ioc_attr->card_type;
2539 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2540
2541 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2542 ad_attr->prototype = 1;
2543 else
2544 ad_attr->prototype = 0;
2545
2546 ad_attr->pwwn = ioc->attr->pwwn;
2547 ad_attr->mac = bfa_ioc_get_mac(ioc);
2548
2549 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2550 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2551 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2552 ad_attr->asic_rev = ioc_attr->asic_rev;
2553
2554 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2555
2556 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2557 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2558 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2559 }
2560
2561 enum bfa_ioc_type_e
2562 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2563 {
2564 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2565 return BFA_IOC_TYPE_LL;
2566
2567 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2568
2569 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2570 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2571 }
2572
2573 void
2574 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2575 {
2576 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2577 memcpy((void *)serial_num,
2578 (void *)ioc->attr->brcd_serialnum,
2579 BFA_ADAPTER_SERIAL_NUM_LEN);
2580 }
2581
2582 void
2583 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2584 {
2585 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2586 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2587 }
2588
2589 void
2590 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2591 {
2592 WARN_ON(!chip_rev);
2593
2594 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2595
2596 chip_rev[0] = 'R';
2597 chip_rev[1] = 'e';
2598 chip_rev[2] = 'v';
2599 chip_rev[3] = '-';
2600 chip_rev[4] = ioc->attr->asic_rev;
2601 chip_rev[5] = '\0';
2602 }
2603
2604 void
2605 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2606 {
2607 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2608 memcpy(optrom_ver, ioc->attr->optrom_version,
2609 BFA_VERSION_LEN);
2610 }
2611
2612 void
2613 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2614 {
2615 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2616 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2617 }
2618
2619 void
2620 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2621 {
2622 struct bfi_ioc_attr_s *ioc_attr;
2623
2624 WARN_ON(!model);
2625 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2626
2627 ioc_attr = ioc->attr;
2628
2629 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2630 BFA_MFG_NAME, ioc_attr->card_type);
2631 }
2632
2633 enum bfa_ioc_state
2634 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2635 {
2636 enum bfa_iocpf_state iocpf_st;
2637 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2638
2639 if (ioc_st == BFA_IOC_ENABLING ||
2640 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2641
2642 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2643
2644 switch (iocpf_st) {
2645 case BFA_IOCPF_SEMWAIT:
2646 ioc_st = BFA_IOC_SEMWAIT;
2647 break;
2648
2649 case BFA_IOCPF_HWINIT:
2650 ioc_st = BFA_IOC_HWINIT;
2651 break;
2652
2653 case BFA_IOCPF_FWMISMATCH:
2654 ioc_st = BFA_IOC_FWMISMATCH;
2655 break;
2656
2657 case BFA_IOCPF_FAIL:
2658 ioc_st = BFA_IOC_FAIL;
2659 break;
2660
2661 case BFA_IOCPF_INITFAIL:
2662 ioc_st = BFA_IOC_INITFAIL;
2663 break;
2664
2665 default:
2666 break;
2667 }
2668 }
2669
2670 return ioc_st;
2671 }
2672
2673 void
2674 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2675 {
2676 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2677
2678 ioc_attr->state = bfa_ioc_get_state(ioc);
2679 ioc_attr->port_id = ioc->port_id;
2680 ioc_attr->port_mode = ioc->port_mode;
2681 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2682 ioc_attr->cap_bm = ioc->ad_cap_bm;
2683
2684 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2685
2686 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2687
2688 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2689 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2690 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2691 }
2692
2693 mac_t
2694 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2695 {
2696 /*
2697 * Check the IOC type and return the appropriate MAC
2698 */
2699 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2700 return ioc->attr->fcoe_mac;
2701 else
2702 return ioc->attr->mac;
2703 }
2704
2705 mac_t
2706 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2707 {
2708 mac_t m;
2709
2710 m = ioc->attr->mfg_mac;
2711 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2712 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2713 else
2714 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2715 bfa_ioc_pcifn(ioc));
2716
2717 return m;
2718 }
2719
2720 /*
2721 * Send AEN notification
2722 */
2723 void
2724 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2725 {
2726 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2727 struct bfa_aen_entry_s *aen_entry;
2728 enum bfa_ioc_type_e ioc_type;
2729
2730 bfad_get_aen_entry(bfad, aen_entry);
2731 if (!aen_entry)
2732 return;
2733
2734 ioc_type = bfa_ioc_get_type(ioc);
2735 switch (ioc_type) {
2736 case BFA_IOC_TYPE_FC:
2737 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2738 break;
2739 case BFA_IOC_TYPE_FCoE:
2740 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2741 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2742 break;
2743 case BFA_IOC_TYPE_LL:
2744 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2745 break;
2746 default:
2747 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2748 break;
2749 }
2750
2751 /* Send the AEN notification */
2752 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2753 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2754 BFA_AEN_CAT_IOC, event);
2755 }
2756
2757 /*
2758 * Retrieve saved firmware trace from a prior IOC failure.
2759 */
2760 bfa_status_t
2761 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2762 {
2763 int tlen;
2764
2765 if (ioc->dbg_fwsave_len == 0)
2766 return BFA_STATUS_ENOFSAVE;
2767
2768 tlen = *trclen;
2769 if (tlen > ioc->dbg_fwsave_len)
2770 tlen = ioc->dbg_fwsave_len;
2771
2772 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2773 *trclen = tlen;
2774 return BFA_STATUS_OK;
2775 }
2776
2777
2778 /*
2779 * Retrieve saved firmware trace from a prior IOC failure.
2780 */
2781 bfa_status_t
2782 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2783 {
2784 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2785 int tlen;
2786 bfa_status_t status;
2787
2788 bfa_trc(ioc, *trclen);
2789
2790 tlen = *trclen;
2791 if (tlen > BFA_DBG_FWTRC_LEN)
2792 tlen = BFA_DBG_FWTRC_LEN;
2793
2794 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2795 *trclen = tlen;
2796 return status;
2797 }
2798
2799 static void
2800 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2801 {
2802 struct bfa_mbox_cmd_s cmd;
2803 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2804
2805 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2806 bfa_ioc_portid(ioc));
2807 req->clscode = cpu_to_be16(ioc->clscode);
2808 bfa_ioc_mbox_queue(ioc, &cmd);
2809 }
2810
2811 static void
2812 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2813 {
2814 u32 fwsync_iter = 1000;
2815
2816 bfa_ioc_send_fwsync(ioc);
2817
2818 /*
2819 * After sending a fw sync mbox command wait for it to
2820 * take effect. We will not wait for a response because
2821 * 1. fw_sync mbox cmd doesn't have a response.
2822 * 2. Even if we implement that, interrupts might not
2823 * be enabled when we call this function.
2824 * So, just keep checking if any mbox cmd is pending, and
2825 * after waiting for a reasonable amount of time, go ahead.
2826 * It is possible that fw has crashed and the mbox command
2827 * is never acknowledged.
2828 */
2829 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2830 fwsync_iter--;
2831 }
2832
2833 /*
2834 * Dump firmware smem
2835 */
2836 bfa_status_t
2837 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2838 u32 *offset, int *buflen)
2839 {
2840 u32 loff;
2841 int dlen;
2842 bfa_status_t status;
2843 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2844
2845 if (*offset >= smem_len) {
2846 *offset = *buflen = 0;
2847 return BFA_STATUS_EINVAL;
2848 }
2849
2850 loff = *offset;
2851 dlen = *buflen;
2852
2853 /*
2854 * First smem read, sync smem before proceeding
2855 * No need to sync before reading every chunk.
2856 */
2857 if (loff == 0)
2858 bfa_ioc_fwsync(ioc);
2859
2860 if ((loff + dlen) >= smem_len)
2861 dlen = smem_len - loff;
2862
2863 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2864
2865 if (status != BFA_STATUS_OK) {
2866 *offset = *buflen = 0;
2867 return status;
2868 }
2869
2870 *offset += dlen;
2871
2872 if (*offset >= smem_len)
2873 *offset = 0;
2874
2875 *buflen = dlen;
2876
2877 return status;
2878 }
2879
2880 /*
2881 * Firmware statistics
2882 */
2883 bfa_status_t
2884 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2885 {
2886 u32 loff = BFI_IOC_FWSTATS_OFF + \
2887 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2888 int tlen;
2889 bfa_status_t status;
2890
2891 if (ioc->stats_busy) {
2892 bfa_trc(ioc, ioc->stats_busy);
2893 return BFA_STATUS_DEVBUSY;
2894 }
2895 ioc->stats_busy = BFA_TRUE;
2896
2897 tlen = sizeof(struct bfa_fw_stats_s);
2898 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2899
2900 ioc->stats_busy = BFA_FALSE;
2901 return status;
2902 }
2903
2904 bfa_status_t
2905 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2906 {
2907 u32 loff = BFI_IOC_FWSTATS_OFF + \
2908 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2909 int tlen;
2910 bfa_status_t status;
2911
2912 if (ioc->stats_busy) {
2913 bfa_trc(ioc, ioc->stats_busy);
2914 return BFA_STATUS_DEVBUSY;
2915 }
2916 ioc->stats_busy = BFA_TRUE;
2917
2918 tlen = sizeof(struct bfa_fw_stats_s);
2919 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2920
2921 ioc->stats_busy = BFA_FALSE;
2922 return status;
2923 }
2924
2925 /*
2926 * Save firmware trace if configured.
2927 */
2928 static void
2929 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2930 {
2931 int tlen;
2932
2933 if (ioc->dbg_fwsave_once) {
2934 ioc->dbg_fwsave_once = BFA_FALSE;
2935 if (ioc->dbg_fwsave_len) {
2936 tlen = ioc->dbg_fwsave_len;
2937 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2938 }
2939 }
2940 }
2941
2942 /*
2943 * Firmware failure detected. Start recovery actions.
2944 */
2945 static void
2946 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2947 {
2948 bfa_ioc_stats(ioc, ioc_hbfails);
2949 ioc->stats.hb_count = ioc->hb_count;
2950 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2951 }
2952
2953 static void
2954 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2955 {
2956 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2957 return;
2958 if (ioc->attr->nwwn == 0)
2959 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2960 if (ioc->attr->pwwn == 0)
2961 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2962 }
2963
2964 /*
2965 * BFA IOC PF private functions
2966 */
2967 static void
2968 bfa_iocpf_timeout(void *ioc_arg)
2969 {
2970 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2971
2972 bfa_trc(ioc, 0);
2973 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2974 }
2975
2976 static void
2977 bfa_iocpf_sem_timeout(void *ioc_arg)
2978 {
2979 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2980
2981 bfa_ioc_hw_sem_get(ioc);
2982 }
2983
2984 static void
2985 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2986 {
2987 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2988
2989 bfa_trc(ioc, fwstate);
2990
2991 if (fwstate == BFI_IOC_DISABLED) {
2992 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2993 return;
2994 }
2995
2996 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2997 bfa_iocpf_timeout(ioc);
2998 else {
2999 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3000 bfa_iocpf_poll_timer_start(ioc);
3001 }
3002 }
3003
3004 static void
3005 bfa_iocpf_poll_timeout(void *ioc_arg)
3006 {
3007 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3008
3009 bfa_ioc_poll_fwinit(ioc);
3010 }
3011
3012 /*
3013 * bfa timer function
3014 */
3015 void
3016 bfa_timer_beat(struct bfa_timer_mod_s *mod)
3017 {
3018 struct list_head *qh = &mod->timer_q;
3019 struct list_head *qe, *qe_next;
3020 struct bfa_timer_s *elem;
3021 struct list_head timedout_q;
3022
3023 INIT_LIST_HEAD(&timedout_q);
3024
3025 qe = bfa_q_next(qh);
3026
3027 while (qe != qh) {
3028 qe_next = bfa_q_next(qe);
3029
3030 elem = (struct bfa_timer_s *) qe;
3031 if (elem->timeout <= BFA_TIMER_FREQ) {
3032 elem->timeout = 0;
3033 list_del(&elem->qe);
3034 list_add_tail(&elem->qe, &timedout_q);
3035 } else {
3036 elem->timeout -= BFA_TIMER_FREQ;
3037 }
3038
3039 qe = qe_next; /* go to next elem */
3040 }
3041
3042 /*
3043 * Pop all the timeout entries
3044 */
3045 while (!list_empty(&timedout_q)) {
3046 bfa_q_deq(&timedout_q, &elem);
3047 elem->timercb(elem->arg);
3048 }
3049 }
3050
3051 /*
3052 * Should be called with lock protection
3053 */
3054 void
3055 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3056 void (*timercb) (void *), void *arg, unsigned int timeout)
3057 {
3058
3059 WARN_ON(timercb == NULL);
3060 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3061
3062 timer->timeout = timeout;
3063 timer->timercb = timercb;
3064 timer->arg = arg;
3065
3066 list_add_tail(&timer->qe, &mod->timer_q);
3067 }
3068
3069 /*
3070 * Should be called with lock protection
3071 */
3072 void
3073 bfa_timer_stop(struct bfa_timer_s *timer)
3074 {
3075 WARN_ON(list_empty(&timer->qe));
3076
3077 list_del(&timer->qe);
3078 }
3079
3080 /*
3081 * ASIC block related
3082 */
3083 static void
3084 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3085 {
3086 struct bfa_ablk_cfg_inst_s *cfg_inst;
3087 int i, j;
3088 u16 be16;
3089 u32 be32;
3090
3091 for (i = 0; i < BFA_ABLK_MAX; i++) {
3092 cfg_inst = &cfg->inst[i];
3093 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3094 be16 = cfg_inst->pf_cfg[j].pers;
3095 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3096 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3097 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3098 be16 = cfg_inst->pf_cfg[j].num_vectors;
3099 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3100 be32 = cfg_inst->pf_cfg[j].bw;
3101 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3102 }
3103 }
3104 }
3105
3106 static void
3107 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3108 {
3109 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3110 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3111 bfa_ablk_cbfn_t cbfn;
3112
3113 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3114 bfa_trc(ablk->ioc, msg->mh.msg_id);
3115
3116 switch (msg->mh.msg_id) {
3117 case BFI_ABLK_I2H_QUERY:
3118 if (rsp->status == BFA_STATUS_OK) {
3119 memcpy(ablk->cfg, ablk->dma_addr.kva,
3120 sizeof(struct bfa_ablk_cfg_s));
3121 bfa_ablk_config_swap(ablk->cfg);
3122 ablk->cfg = NULL;
3123 }
3124 break;
3125
3126 case BFI_ABLK_I2H_ADPT_CONFIG:
3127 case BFI_ABLK_I2H_PORT_CONFIG:
3128 /* update config port mode */
3129 ablk->ioc->port_mode_cfg = rsp->port_mode;
3130
3131 case BFI_ABLK_I2H_PF_DELETE:
3132 case BFI_ABLK_I2H_PF_UPDATE:
3133 case BFI_ABLK_I2H_OPTROM_ENABLE:
3134 case BFI_ABLK_I2H_OPTROM_DISABLE:
3135 /* No-op */
3136 break;
3137
3138 case BFI_ABLK_I2H_PF_CREATE:
3139 *(ablk->pcifn) = rsp->pcifn;
3140 ablk->pcifn = NULL;
3141 break;
3142
3143 default:
3144 WARN_ON(1);
3145 }
3146
3147 ablk->busy = BFA_FALSE;
3148 if (ablk->cbfn) {
3149 cbfn = ablk->cbfn;
3150 ablk->cbfn = NULL;
3151 cbfn(ablk->cbarg, rsp->status);
3152 }
3153 }
3154
3155 static void
3156 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3157 {
3158 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3159
3160 bfa_trc(ablk->ioc, event);
3161
3162 switch (event) {
3163 case BFA_IOC_E_ENABLED:
3164 WARN_ON(ablk->busy != BFA_FALSE);
3165 break;
3166
3167 case BFA_IOC_E_DISABLED:
3168 case BFA_IOC_E_FAILED:
3169 /* Fail any pending requests */
3170 ablk->pcifn = NULL;
3171 if (ablk->busy) {
3172 if (ablk->cbfn)
3173 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3174 ablk->cbfn = NULL;
3175 ablk->busy = BFA_FALSE;
3176 }
3177 break;
3178
3179 default:
3180 WARN_ON(1);
3181 break;
3182 }
3183 }
3184
3185 u32
3186 bfa_ablk_meminfo(void)
3187 {
3188 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3189 }
3190
3191 void
3192 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3193 {
3194 ablk->dma_addr.kva = dma_kva;
3195 ablk->dma_addr.pa = dma_pa;
3196 }
3197
3198 void
3199 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3200 {
3201 ablk->ioc = ioc;
3202
3203 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3204 bfa_q_qe_init(&ablk->ioc_notify);
3205 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3206 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3207 }
3208
3209 bfa_status_t
3210 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3211 bfa_ablk_cbfn_t cbfn, void *cbarg)
3212 {
3213 struct bfi_ablk_h2i_query_s *m;
3214
3215 WARN_ON(!ablk_cfg);
3216
3217 if (!bfa_ioc_is_operational(ablk->ioc)) {
3218 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3219 return BFA_STATUS_IOC_FAILURE;
3220 }
3221
3222 if (ablk->busy) {
3223 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3224 return BFA_STATUS_DEVBUSY;
3225 }
3226
3227 ablk->cfg = ablk_cfg;
3228 ablk->cbfn = cbfn;
3229 ablk->cbarg = cbarg;
3230 ablk->busy = BFA_TRUE;
3231
3232 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3233 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3234 bfa_ioc_portid(ablk->ioc));
3235 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3236 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3237
3238 return BFA_STATUS_OK;
3239 }
3240
3241 bfa_status_t
3242 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3243 u8 port, enum bfi_pcifn_class personality, int bw,
3244 bfa_ablk_cbfn_t cbfn, void *cbarg)
3245 {
3246 struct bfi_ablk_h2i_pf_req_s *m;
3247
3248 if (!bfa_ioc_is_operational(ablk->ioc)) {
3249 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3250 return BFA_STATUS_IOC_FAILURE;
3251 }
3252
3253 if (ablk->busy) {
3254 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3255 return BFA_STATUS_DEVBUSY;
3256 }
3257
3258 ablk->pcifn = pcifn;
3259 ablk->cbfn = cbfn;
3260 ablk->cbarg = cbarg;
3261 ablk->busy = BFA_TRUE;
3262
3263 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3264 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3265 bfa_ioc_portid(ablk->ioc));
3266 m->pers = cpu_to_be16((u16)personality);
3267 m->bw = cpu_to_be32(bw);
3268 m->port = port;
3269 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3270
3271 return BFA_STATUS_OK;
3272 }
3273
3274 bfa_status_t
3275 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3276 bfa_ablk_cbfn_t cbfn, void *cbarg)
3277 {
3278 struct bfi_ablk_h2i_pf_req_s *m;
3279
3280 if (!bfa_ioc_is_operational(ablk->ioc)) {
3281 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3282 return BFA_STATUS_IOC_FAILURE;
3283 }
3284
3285 if (ablk->busy) {
3286 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3287 return BFA_STATUS_DEVBUSY;
3288 }
3289
3290 ablk->cbfn = cbfn;
3291 ablk->cbarg = cbarg;
3292 ablk->busy = BFA_TRUE;
3293
3294 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3295 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3296 bfa_ioc_portid(ablk->ioc));
3297 m->pcifn = (u8)pcifn;
3298 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3299
3300 return BFA_STATUS_OK;
3301 }
3302
3303 bfa_status_t
3304 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3305 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3306 {
3307 struct bfi_ablk_h2i_cfg_req_s *m;
3308
3309 if (!bfa_ioc_is_operational(ablk->ioc)) {
3310 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3311 return BFA_STATUS_IOC_FAILURE;
3312 }
3313
3314 if (ablk->busy) {
3315 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3316 return BFA_STATUS_DEVBUSY;
3317 }
3318
3319 ablk->cbfn = cbfn;
3320 ablk->cbarg = cbarg;
3321 ablk->busy = BFA_TRUE;
3322
3323 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3324 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3325 bfa_ioc_portid(ablk->ioc));
3326 m->mode = (u8)mode;
3327 m->max_pf = (u8)max_pf;
3328 m->max_vf = (u8)max_vf;
3329 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3330
3331 return BFA_STATUS_OK;
3332 }
3333
3334 bfa_status_t
3335 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3336 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3337 {
3338 struct bfi_ablk_h2i_cfg_req_s *m;
3339
3340 if (!bfa_ioc_is_operational(ablk->ioc)) {
3341 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3342 return BFA_STATUS_IOC_FAILURE;
3343 }
3344
3345 if (ablk->busy) {
3346 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3347 return BFA_STATUS_DEVBUSY;
3348 }
3349
3350 ablk->cbfn = cbfn;
3351 ablk->cbarg = cbarg;
3352 ablk->busy = BFA_TRUE;
3353
3354 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3355 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3356 bfa_ioc_portid(ablk->ioc));
3357 m->port = (u8)port;
3358 m->mode = (u8)mode;
3359 m->max_pf = (u8)max_pf;
3360 m->max_vf = (u8)max_vf;
3361 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3362
3363 return BFA_STATUS_OK;
3364 }
3365
3366 bfa_status_t
3367 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3368 bfa_ablk_cbfn_t cbfn, void *cbarg)
3369 {
3370 struct bfi_ablk_h2i_pf_req_s *m;
3371
3372 if (!bfa_ioc_is_operational(ablk->ioc)) {
3373 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3374 return BFA_STATUS_IOC_FAILURE;
3375 }
3376
3377 if (ablk->busy) {
3378 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3379 return BFA_STATUS_DEVBUSY;
3380 }
3381
3382 ablk->cbfn = cbfn;
3383 ablk->cbarg = cbarg;
3384 ablk->busy = BFA_TRUE;
3385
3386 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3387 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3388 bfa_ioc_portid(ablk->ioc));
3389 m->pcifn = (u8)pcifn;
3390 m->bw = cpu_to_be32(bw);
3391 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3392
3393 return BFA_STATUS_OK;
3394 }
3395
3396 bfa_status_t
3397 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3398 {
3399 struct bfi_ablk_h2i_optrom_s *m;
3400
3401 if (!bfa_ioc_is_operational(ablk->ioc)) {
3402 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3403 return BFA_STATUS_IOC_FAILURE;
3404 }
3405
3406 if (ablk->busy) {
3407 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3408 return BFA_STATUS_DEVBUSY;
3409 }
3410
3411 ablk->cbfn = cbfn;
3412 ablk->cbarg = cbarg;
3413 ablk->busy = BFA_TRUE;
3414
3415 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3416 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3417 bfa_ioc_portid(ablk->ioc));
3418 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3419
3420 return BFA_STATUS_OK;
3421 }
3422
3423 bfa_status_t
3424 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3425 {
3426 struct bfi_ablk_h2i_optrom_s *m;
3427
3428 if (!bfa_ioc_is_operational(ablk->ioc)) {
3429 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3430 return BFA_STATUS_IOC_FAILURE;
3431 }
3432
3433 if (ablk->busy) {
3434 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3435 return BFA_STATUS_DEVBUSY;
3436 }
3437
3438 ablk->cbfn = cbfn;
3439 ablk->cbarg = cbarg;
3440 ablk->busy = BFA_TRUE;
3441
3442 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3443 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3444 bfa_ioc_portid(ablk->ioc));
3445 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3446
3447 return BFA_STATUS_OK;
3448 }
3449
3450 /*
3451 * SFP module specific
3452 */
3453
3454 /* forward declarations */
3455 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3456 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3457 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3458 enum bfa_port_speed portspeed);
3459
3460 static void
3461 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3462 {
3463 bfa_trc(sfp, sfp->lock);
3464 if (sfp->cbfn)
3465 sfp->cbfn(sfp->cbarg, sfp->status);
3466 sfp->lock = 0;
3467 sfp->cbfn = NULL;
3468 }
3469
3470 static void
3471 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3472 {
3473 bfa_trc(sfp, sfp->portspeed);
3474 if (sfp->media) {
3475 bfa_sfp_media_get(sfp);
3476 if (sfp->state_query_cbfn)
3477 sfp->state_query_cbfn(sfp->state_query_cbarg,
3478 sfp->status);
3479 sfp->media = NULL;
3480 }
3481
3482 if (sfp->portspeed) {
3483 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3484 if (sfp->state_query_cbfn)
3485 sfp->state_query_cbfn(sfp->state_query_cbarg,
3486 sfp->status);
3487 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3488 }
3489
3490 sfp->state_query_lock = 0;
3491 sfp->state_query_cbfn = NULL;
3492 }
3493
3494 /*
3495 * IOC event handler.
3496 */
3497 static void
3498 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3499 {
3500 struct bfa_sfp_s *sfp = sfp_arg;
3501
3502 bfa_trc(sfp, event);
3503 bfa_trc(sfp, sfp->lock);
3504 bfa_trc(sfp, sfp->state_query_lock);
3505
3506 switch (event) {
3507 case BFA_IOC_E_DISABLED:
3508 case BFA_IOC_E_FAILED:
3509 if (sfp->lock) {
3510 sfp->status = BFA_STATUS_IOC_FAILURE;
3511 bfa_cb_sfp_show(sfp);
3512 }
3513
3514 if (sfp->state_query_lock) {
3515 sfp->status = BFA_STATUS_IOC_FAILURE;
3516 bfa_cb_sfp_state_query(sfp);
3517 }
3518 break;
3519
3520 default:
3521 break;
3522 }
3523 }
3524
3525 /*
3526 * SFP's State Change Notification post to AEN
3527 */
3528 static void
3529 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3530 {
3531 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3532 struct bfa_aen_entry_s *aen_entry;
3533 enum bfa_port_aen_event aen_evt = 0;
3534
3535 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3536 ((u64)rsp->event));
3537
3538 bfad_get_aen_entry(bfad, aen_entry);
3539 if (!aen_entry)
3540 return;
3541
3542 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3543 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3544 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3545
3546 switch (rsp->event) {
3547 case BFA_SFP_SCN_INSERTED:
3548 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3549 break;
3550 case BFA_SFP_SCN_REMOVED:
3551 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3552 break;
3553 case BFA_SFP_SCN_FAILED:
3554 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3555 break;
3556 case BFA_SFP_SCN_UNSUPPORT:
3557 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3558 break;
3559 case BFA_SFP_SCN_POM:
3560 aen_evt = BFA_PORT_AEN_SFP_POM;
3561 aen_entry->aen_data.port.level = rsp->pomlvl;
3562 break;
3563 default:
3564 bfa_trc(sfp, rsp->event);
3565 WARN_ON(1);
3566 }
3567
3568 /* Send the AEN notification */
3569 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3570 BFA_AEN_CAT_PORT, aen_evt);
3571 }
3572
3573 /*
3574 * SFP get data send
3575 */
3576 static void
3577 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3578 {
3579 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3580
3581 bfa_trc(sfp, req->memtype);
3582
3583 /* build host command */
3584 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3585 bfa_ioc_portid(sfp->ioc));
3586
3587 /* send mbox cmd */
3588 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3589 }
3590
3591 /*
3592 * SFP is valid, read sfp data
3593 */
3594 static void
3595 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3596 {
3597 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3598
3599 WARN_ON(sfp->lock != 0);
3600 bfa_trc(sfp, sfp->state);
3601
3602 sfp->lock = 1;
3603 sfp->memtype = memtype;
3604 req->memtype = memtype;
3605
3606 /* Setup SG list */
3607 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3608
3609 bfa_sfp_getdata_send(sfp);
3610 }
3611
3612 /*
3613 * SFP scn handler
3614 */
3615 static void
3616 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3617 {
3618 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3619
3620 switch (rsp->event) {
3621 case BFA_SFP_SCN_INSERTED:
3622 sfp->state = BFA_SFP_STATE_INSERTED;
3623 sfp->data_valid = 0;
3624 bfa_sfp_scn_aen_post(sfp, rsp);
3625 break;
3626 case BFA_SFP_SCN_REMOVED:
3627 sfp->state = BFA_SFP_STATE_REMOVED;
3628 sfp->data_valid = 0;
3629 bfa_sfp_scn_aen_post(sfp, rsp);
3630 break;
3631 case BFA_SFP_SCN_FAILED:
3632 sfp->state = BFA_SFP_STATE_FAILED;
3633 sfp->data_valid = 0;
3634 bfa_sfp_scn_aen_post(sfp, rsp);
3635 break;
3636 case BFA_SFP_SCN_UNSUPPORT:
3637 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3638 bfa_sfp_scn_aen_post(sfp, rsp);
3639 if (!sfp->lock)
3640 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3641 break;
3642 case BFA_SFP_SCN_POM:
3643 bfa_sfp_scn_aen_post(sfp, rsp);
3644 break;
3645 case BFA_SFP_SCN_VALID:
3646 sfp->state = BFA_SFP_STATE_VALID;
3647 if (!sfp->lock)
3648 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3649 break;
3650 default:
3651 bfa_trc(sfp, rsp->event);
3652 WARN_ON(1);
3653 }
3654 }
3655
3656 /*
3657 * SFP show complete
3658 */
3659 static void
3660 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3661 {
3662 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3663
3664 if (!sfp->lock) {
3665 /*
3666 * receiving response after ioc failure
3667 */
3668 bfa_trc(sfp, sfp->lock);
3669 return;
3670 }
3671
3672 bfa_trc(sfp, rsp->status);
3673 if (rsp->status == BFA_STATUS_OK) {
3674 sfp->data_valid = 1;
3675 if (sfp->state == BFA_SFP_STATE_VALID)
3676 sfp->status = BFA_STATUS_OK;
3677 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3678 sfp->status = BFA_STATUS_SFP_UNSUPP;
3679 else
3680 bfa_trc(sfp, sfp->state);
3681 } else {
3682 sfp->data_valid = 0;
3683 sfp->status = rsp->status;
3684 /* sfpshow shouldn't change sfp state */
3685 }
3686
3687 bfa_trc(sfp, sfp->memtype);
3688 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3689 bfa_trc(sfp, sfp->data_valid);
3690 if (sfp->data_valid) {
3691 u32 size = sizeof(struct sfp_mem_s);
3692 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3693 memcpy(des, sfp->dbuf_kva, size);
3694 }
3695 /*
3696 * Queue completion callback.
3697 */
3698 bfa_cb_sfp_show(sfp);
3699 } else
3700 sfp->lock = 0;
3701
3702 bfa_trc(sfp, sfp->state_query_lock);
3703 if (sfp->state_query_lock) {
3704 sfp->state = rsp->state;
3705 /* Complete callback */
3706 bfa_cb_sfp_state_query(sfp);
3707 }
3708 }
3709
3710 /*
3711 * SFP query fw sfp state
3712 */
3713 static void
3714 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3715 {
3716 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3717
3718 /* Should not be doing query if not in _INIT state */
3719 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3720 WARN_ON(sfp->state_query_lock != 0);
3721 bfa_trc(sfp, sfp->state);
3722
3723 sfp->state_query_lock = 1;
3724 req->memtype = 0;
3725
3726 if (!sfp->lock)
3727 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3728 }
3729
3730 static void
3731 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3732 {
3733 enum bfa_defs_sfp_media_e *media = sfp->media;
3734
3735 *media = BFA_SFP_MEDIA_UNKNOWN;
3736
3737 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3738 *media = BFA_SFP_MEDIA_UNSUPPORT;
3739 else if (sfp->state == BFA_SFP_STATE_VALID) {
3740 union sfp_xcvr_e10g_code_u e10g;
3741 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3742 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3743 (sfpmem->srlid_base.xcvr[5] >> 1);
3744
3745 e10g.b = sfpmem->srlid_base.xcvr[0];
3746 bfa_trc(sfp, e10g.b);
3747 bfa_trc(sfp, xmtr_tech);
3748 /* check fc transmitter tech */
3749 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3750 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3751 (xmtr_tech & SFP_XMTR_TECH_CA))
3752 *media = BFA_SFP_MEDIA_CU;
3753 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3754 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3755 *media = BFA_SFP_MEDIA_EL;
3756 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3757 (xmtr_tech & SFP_XMTR_TECH_LC))
3758 *media = BFA_SFP_MEDIA_LW;
3759 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3760 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3761 (xmtr_tech & SFP_XMTR_TECH_SA))
3762 *media = BFA_SFP_MEDIA_SW;
3763 /* Check 10G Ethernet Compilance code */
3764 else if (e10g.r.e10g_sr)
3765 *media = BFA_SFP_MEDIA_SW;
3766 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3767 *media = BFA_SFP_MEDIA_LW;
3768 else if (e10g.r.e10g_unall)
3769 *media = BFA_SFP_MEDIA_UNKNOWN;
3770 else
3771 bfa_trc(sfp, 0);
3772 } else
3773 bfa_trc(sfp, sfp->state);
3774 }
3775
3776 static bfa_status_t
3777 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3778 {
3779 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3780 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3781 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3782 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3783
3784 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3785 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3786 return BFA_STATUS_OK;
3787 else {
3788 bfa_trc(sfp, e10g.b);
3789 return BFA_STATUS_UNSUPP_SPEED;
3790 }
3791 }
3792 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3793 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3794 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3795 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3796 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3797 return BFA_STATUS_OK;
3798 else {
3799 bfa_trc(sfp, portspeed);
3800 bfa_trc(sfp, fc3.b);
3801 bfa_trc(sfp, e10g.b);
3802 return BFA_STATUS_UNSUPP_SPEED;
3803 }
3804 }
3805
3806 /*
3807 * SFP hmbox handler
3808 */
3809 void
3810 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3811 {
3812 struct bfa_sfp_s *sfp = sfparg;
3813
3814 switch (msg->mh.msg_id) {
3815 case BFI_SFP_I2H_SHOW:
3816 bfa_sfp_show_comp(sfp, msg);
3817 break;
3818
3819 case BFI_SFP_I2H_SCN:
3820 bfa_sfp_scn(sfp, msg);
3821 break;
3822
3823 default:
3824 bfa_trc(sfp, msg->mh.msg_id);
3825 WARN_ON(1);
3826 }
3827 }
3828
3829 /*
3830 * Return DMA memory needed by sfp module.
3831 */
3832 u32
3833 bfa_sfp_meminfo(void)
3834 {
3835 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3836 }
3837
3838 /*
3839 * Attach virtual and physical memory for SFP.
3840 */
3841 void
3842 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3843 struct bfa_trc_mod_s *trcmod)
3844 {
3845 sfp->dev = dev;
3846 sfp->ioc = ioc;
3847 sfp->trcmod = trcmod;
3848
3849 sfp->cbfn = NULL;
3850 sfp->cbarg = NULL;
3851 sfp->sfpmem = NULL;
3852 sfp->lock = 0;
3853 sfp->data_valid = 0;
3854 sfp->state = BFA_SFP_STATE_INIT;
3855 sfp->state_query_lock = 0;
3856 sfp->state_query_cbfn = NULL;
3857 sfp->state_query_cbarg = NULL;
3858 sfp->media = NULL;
3859 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3860 sfp->is_elb = BFA_FALSE;
3861
3862 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3863 bfa_q_qe_init(&sfp->ioc_notify);
3864 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3865 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3866 }
3867
3868 /*
3869 * Claim Memory for SFP
3870 */
3871 void
3872 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3873 {
3874 sfp->dbuf_kva = dm_kva;
3875 sfp->dbuf_pa = dm_pa;
3876 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3877
3878 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3879 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3880 }
3881
3882 /*
3883 * Show SFP eeprom content
3884 *
3885 * @param[in] sfp - bfa sfp module
3886 *
3887 * @param[out] sfpmem - sfp eeprom data
3888 *
3889 */
3890 bfa_status_t
3891 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3892 bfa_cb_sfp_t cbfn, void *cbarg)
3893 {
3894
3895 if (!bfa_ioc_is_operational(sfp->ioc)) {
3896 bfa_trc(sfp, 0);
3897 return BFA_STATUS_IOC_NON_OP;
3898 }
3899
3900 if (sfp->lock) {
3901 bfa_trc(sfp, 0);
3902 return BFA_STATUS_DEVBUSY;
3903 }
3904
3905 sfp->cbfn = cbfn;
3906 sfp->cbarg = cbarg;
3907 sfp->sfpmem = sfpmem;
3908
3909 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3910 return BFA_STATUS_OK;
3911 }
3912
3913 /*
3914 * Return SFP Media type
3915 *
3916 * @param[in] sfp - bfa sfp module
3917 *
3918 * @param[out] media - port speed from user
3919 *
3920 */
3921 bfa_status_t
3922 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3923 bfa_cb_sfp_t cbfn, void *cbarg)
3924 {
3925 if (!bfa_ioc_is_operational(sfp->ioc)) {
3926 bfa_trc(sfp, 0);
3927 return BFA_STATUS_IOC_NON_OP;
3928 }
3929
3930 sfp->media = media;
3931 if (sfp->state == BFA_SFP_STATE_INIT) {
3932 if (sfp->state_query_lock) {
3933 bfa_trc(sfp, 0);
3934 return BFA_STATUS_DEVBUSY;
3935 } else {
3936 sfp->state_query_cbfn = cbfn;
3937 sfp->state_query_cbarg = cbarg;
3938 bfa_sfp_state_query(sfp);
3939 return BFA_STATUS_SFP_NOT_READY;
3940 }
3941 }
3942
3943 bfa_sfp_media_get(sfp);
3944 return BFA_STATUS_OK;
3945 }
3946
3947 /*
3948 * Check if user set port speed is allowed by the SFP
3949 *
3950 * @param[in] sfp - bfa sfp module
3951 * @param[in] portspeed - port speed from user
3952 *
3953 */
3954 bfa_status_t
3955 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3956 bfa_cb_sfp_t cbfn, void *cbarg)
3957 {
3958 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3959
3960 if (!bfa_ioc_is_operational(sfp->ioc))
3961 return BFA_STATUS_IOC_NON_OP;
3962
3963 /* For Mezz card, all speed is allowed */
3964 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3965 return BFA_STATUS_OK;
3966
3967 /* Check SFP state */
3968 sfp->portspeed = portspeed;
3969 if (sfp->state == BFA_SFP_STATE_INIT) {
3970 if (sfp->state_query_lock) {
3971 bfa_trc(sfp, 0);
3972 return BFA_STATUS_DEVBUSY;
3973 } else {
3974 sfp->state_query_cbfn = cbfn;
3975 sfp->state_query_cbarg = cbarg;
3976 bfa_sfp_state_query(sfp);
3977 return BFA_STATUS_SFP_NOT_READY;
3978 }
3979 }
3980
3981 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3982 sfp->state == BFA_SFP_STATE_FAILED) {
3983 bfa_trc(sfp, sfp->state);
3984 return BFA_STATUS_NO_SFP_DEV;
3985 }
3986
3987 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3988 bfa_trc(sfp, sfp->state);
3989 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3990 }
3991
3992 /* For eloopback, all speed is allowed */
3993 if (sfp->is_elb)
3994 return BFA_STATUS_OK;
3995
3996 return bfa_sfp_speed_valid(sfp, portspeed);
3997 }
3998
3999 /*
4000 * Flash module specific
4001 */
4002
4003 /*
4004 * FLASH DMA buffer should be big enough to hold both MFG block and
4005 * asic block(64k) at the same time and also should be 2k aligned to
4006 * avoid write segement to cross sector boundary.
4007 */
4008 #define BFA_FLASH_SEG_SZ 2048
4009 #define BFA_FLASH_DMA_BUF_SZ \
4010 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4011
4012 static void
4013 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4014 int inst, int type)
4015 {
4016 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4017 struct bfa_aen_entry_s *aen_entry;
4018
4019 bfad_get_aen_entry(bfad, aen_entry);
4020 if (!aen_entry)
4021 return;
4022
4023 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4024 aen_entry->aen_data.audit.partition_inst = inst;
4025 aen_entry->aen_data.audit.partition_type = type;
4026
4027 /* Send the AEN notification */
4028 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4029 BFA_AEN_CAT_AUDIT, event);
4030 }
4031
4032 static void
4033 bfa_flash_cb(struct bfa_flash_s *flash)
4034 {
4035 flash->op_busy = 0;
4036 if (flash->cbfn)
4037 flash->cbfn(flash->cbarg, flash->status);
4038 }
4039
4040 static void
4041 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4042 {
4043 struct bfa_flash_s *flash = cbarg;
4044
4045 bfa_trc(flash, event);
4046 switch (event) {
4047 case BFA_IOC_E_DISABLED:
4048 case BFA_IOC_E_FAILED:
4049 if (flash->op_busy) {
4050 flash->status = BFA_STATUS_IOC_FAILURE;
4051 flash->cbfn(flash->cbarg, flash->status);
4052 flash->op_busy = 0;
4053 }
4054 break;
4055
4056 default:
4057 break;
4058 }
4059 }
4060
4061 /*
4062 * Send flash attribute query request.
4063 *
4064 * @param[in] cbarg - callback argument
4065 */
4066 static void
4067 bfa_flash_query_send(void *cbarg)
4068 {
4069 struct bfa_flash_s *flash = cbarg;
4070 struct bfi_flash_query_req_s *msg =
4071 (struct bfi_flash_query_req_s *) flash->mb.msg;
4072
4073 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4074 bfa_ioc_portid(flash->ioc));
4075 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4076 flash->dbuf_pa);
4077 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4078 }
4079
4080 /*
4081 * Send flash write request.
4082 *
4083 * @param[in] cbarg - callback argument
4084 */
4085 static void
4086 bfa_flash_write_send(struct bfa_flash_s *flash)
4087 {
4088 struct bfi_flash_write_req_s *msg =
4089 (struct bfi_flash_write_req_s *) flash->mb.msg;
4090 u32 len;
4091
4092 msg->type = be32_to_cpu(flash->type);
4093 msg->instance = flash->instance;
4094 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4095 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4096 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4097 msg->length = be32_to_cpu(len);
4098
4099 /* indicate if it's the last msg of the whole write operation */
4100 msg->last = (len == flash->residue) ? 1 : 0;
4101
4102 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4103 bfa_ioc_portid(flash->ioc));
4104 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4105 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4106 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4107
4108 flash->residue -= len;
4109 flash->offset += len;
4110 }
4111
4112 /*
4113 * Send flash read request.
4114 *
4115 * @param[in] cbarg - callback argument
4116 */
4117 static void
4118 bfa_flash_read_send(void *cbarg)
4119 {
4120 struct bfa_flash_s *flash = cbarg;
4121 struct bfi_flash_read_req_s *msg =
4122 (struct bfi_flash_read_req_s *) flash->mb.msg;
4123 u32 len;
4124
4125 msg->type = be32_to_cpu(flash->type);
4126 msg->instance = flash->instance;
4127 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4128 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4129 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4130 msg->length = be32_to_cpu(len);
4131 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4132 bfa_ioc_portid(flash->ioc));
4133 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4134 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4135 }
4136
4137 /*
4138 * Send flash erase request.
4139 *
4140 * @param[in] cbarg - callback argument
4141 */
4142 static void
4143 bfa_flash_erase_send(void *cbarg)
4144 {
4145 struct bfa_flash_s *flash = cbarg;
4146 struct bfi_flash_erase_req_s *msg =
4147 (struct bfi_flash_erase_req_s *) flash->mb.msg;
4148
4149 msg->type = be32_to_cpu(flash->type);
4150 msg->instance = flash->instance;
4151 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4152 bfa_ioc_portid(flash->ioc));
4153 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4154 }
4155
4156 /*
4157 * Process flash response messages upon receiving interrupts.
4158 *
4159 * @param[in] flasharg - flash structure
4160 * @param[in] msg - message structure
4161 */
4162 static void
4163 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4164 {
4165 struct bfa_flash_s *flash = flasharg;
4166 u32 status;
4167
4168 union {
4169 struct bfi_flash_query_rsp_s *query;
4170 struct bfi_flash_erase_rsp_s *erase;
4171 struct bfi_flash_write_rsp_s *write;
4172 struct bfi_flash_read_rsp_s *read;
4173 struct bfi_flash_event_s *event;
4174 struct bfi_mbmsg_s *msg;
4175 } m;
4176
4177 m.msg = msg;
4178 bfa_trc(flash, msg->mh.msg_id);
4179
4180 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4181 /* receiving response after ioc failure */
4182 bfa_trc(flash, 0x9999);
4183 return;
4184 }
4185
4186 switch (msg->mh.msg_id) {
4187 case BFI_FLASH_I2H_QUERY_RSP:
4188 status = be32_to_cpu(m.query->status);
4189 bfa_trc(flash, status);
4190 if (status == BFA_STATUS_OK) {
4191 u32 i;
4192 struct bfa_flash_attr_s *attr, *f;
4193
4194 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4195 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4196 attr->status = be32_to_cpu(f->status);
4197 attr->npart = be32_to_cpu(f->npart);
4198 bfa_trc(flash, attr->status);
4199 bfa_trc(flash, attr->npart);
4200 for (i = 0; i < attr->npart; i++) {
4201 attr->part[i].part_type =
4202 be32_to_cpu(f->part[i].part_type);
4203 attr->part[i].part_instance =
4204 be32_to_cpu(f->part[i].part_instance);
4205 attr->part[i].part_off =
4206 be32_to_cpu(f->part[i].part_off);
4207 attr->part[i].part_size =
4208 be32_to_cpu(f->part[i].part_size);
4209 attr->part[i].part_len =
4210 be32_to_cpu(f->part[i].part_len);
4211 attr->part[i].part_status =
4212 be32_to_cpu(f->part[i].part_status);
4213 }
4214 }
4215 flash->status = status;
4216 bfa_flash_cb(flash);
4217 break;
4218 case BFI_FLASH_I2H_ERASE_RSP:
4219 status = be32_to_cpu(m.erase->status);
4220 bfa_trc(flash, status);
4221 flash->status = status;
4222 bfa_flash_cb(flash);
4223 break;
4224 case BFI_FLASH_I2H_WRITE_RSP:
4225 status = be32_to_cpu(m.write->status);
4226 bfa_trc(flash, status);
4227 if (status != BFA_STATUS_OK || flash->residue == 0) {
4228 flash->status = status;
4229 bfa_flash_cb(flash);
4230 } else {
4231 bfa_trc(flash, flash->offset);
4232 bfa_flash_write_send(flash);
4233 }
4234 break;
4235 case BFI_FLASH_I2H_READ_RSP:
4236 status = be32_to_cpu(m.read->status);
4237 bfa_trc(flash, status);
4238 if (status != BFA_STATUS_OK) {
4239 flash->status = status;
4240 bfa_flash_cb(flash);
4241 } else {
4242 u32 len = be32_to_cpu(m.read->length);
4243 bfa_trc(flash, flash->offset);
4244 bfa_trc(flash, len);
4245 memcpy(flash->ubuf + flash->offset,
4246 flash->dbuf_kva, len);
4247 flash->residue -= len;
4248 flash->offset += len;
4249 if (flash->residue == 0) {
4250 flash->status = status;
4251 bfa_flash_cb(flash);
4252 } else
4253 bfa_flash_read_send(flash);
4254 }
4255 break;
4256 case BFI_FLASH_I2H_BOOT_VER_RSP:
4257 break;
4258 case BFI_FLASH_I2H_EVENT:
4259 status = be32_to_cpu(m.event->status);
4260 bfa_trc(flash, status);
4261 if (status == BFA_STATUS_BAD_FWCFG)
4262 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4263 else if (status == BFA_STATUS_INVALID_VENDOR) {
4264 u32 param;
4265 param = be32_to_cpu(m.event->param);
4266 bfa_trc(flash, param);
4267 bfa_ioc_aen_post(flash->ioc,
4268 BFA_IOC_AEN_INVALID_VENDOR);
4269 }
4270 break;
4271
4272 default:
4273 WARN_ON(1);
4274 }
4275 }
4276
4277 /*
4278 * Flash memory info API.
4279 *
4280 * @param[in] mincfg - minimal cfg variable
4281 */
4282 u32
4283 bfa_flash_meminfo(bfa_boolean_t mincfg)
4284 {
4285 /* min driver doesn't need flash */
4286 if (mincfg)
4287 return 0;
4288 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4289 }
4290
4291 /*
4292 * Flash attach API.
4293 *
4294 * @param[in] flash - flash structure
4295 * @param[in] ioc - ioc structure
4296 * @param[in] dev - device structure
4297 * @param[in] trcmod - trace module
4298 * @param[in] logmod - log module
4299 */
4300 void
4301 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4302 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4303 {
4304 flash->ioc = ioc;
4305 flash->trcmod = trcmod;
4306 flash->cbfn = NULL;
4307 flash->cbarg = NULL;
4308 flash->op_busy = 0;
4309
4310 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4311 bfa_q_qe_init(&flash->ioc_notify);
4312 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4313 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4314
4315 /* min driver doesn't need flash */
4316 if (mincfg) {
4317 flash->dbuf_kva = NULL;
4318 flash->dbuf_pa = 0;
4319 }
4320 }
4321
4322 /*
4323 * Claim memory for flash
4324 *
4325 * @param[in] flash - flash structure
4326 * @param[in] dm_kva - pointer to virtual memory address
4327 * @param[in] dm_pa - physical memory address
4328 * @param[in] mincfg - minimal cfg variable
4329 */
4330 void
4331 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4332 bfa_boolean_t mincfg)
4333 {
4334 if (mincfg)
4335 return;
4336
4337 flash->dbuf_kva = dm_kva;
4338 flash->dbuf_pa = dm_pa;
4339 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4340 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4341 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4342 }
4343
4344 /*
4345 * Get flash attribute.
4346 *
4347 * @param[in] flash - flash structure
4348 * @param[in] attr - flash attribute structure
4349 * @param[in] cbfn - callback function
4350 * @param[in] cbarg - callback argument
4351 *
4352 * Return status.
4353 */
4354 bfa_status_t
4355 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4356 bfa_cb_flash_t cbfn, void *cbarg)
4357 {
4358 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4359
4360 if (!bfa_ioc_is_operational(flash->ioc))
4361 return BFA_STATUS_IOC_NON_OP;
4362
4363 if (flash->op_busy) {
4364 bfa_trc(flash, flash->op_busy);
4365 return BFA_STATUS_DEVBUSY;
4366 }
4367
4368 flash->op_busy = 1;
4369 flash->cbfn = cbfn;
4370 flash->cbarg = cbarg;
4371 flash->ubuf = (u8 *) attr;
4372 bfa_flash_query_send(flash);
4373
4374 return BFA_STATUS_OK;
4375 }
4376
4377 /*
4378 * Erase flash partition.
4379 *
4380 * @param[in] flash - flash structure
4381 * @param[in] type - flash partition type
4382 * @param[in] instance - flash partition instance
4383 * @param[in] cbfn - callback function
4384 * @param[in] cbarg - callback argument
4385 *
4386 * Return status.
4387 */
4388 bfa_status_t
4389 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4390 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4391 {
4392 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4393 bfa_trc(flash, type);
4394 bfa_trc(flash, instance);
4395
4396 if (!bfa_ioc_is_operational(flash->ioc))
4397 return BFA_STATUS_IOC_NON_OP;
4398
4399 if (flash->op_busy) {
4400 bfa_trc(flash, flash->op_busy);
4401 return BFA_STATUS_DEVBUSY;
4402 }
4403
4404 flash->op_busy = 1;
4405 flash->cbfn = cbfn;
4406 flash->cbarg = cbarg;
4407 flash->type = type;
4408 flash->instance = instance;
4409
4410 bfa_flash_erase_send(flash);
4411 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4412 instance, type);
4413 return BFA_STATUS_OK;
4414 }
4415
4416 /*
4417 * Update flash partition.
4418 *
4419 * @param[in] flash - flash structure
4420 * @param[in] type - flash partition type
4421 * @param[in] instance - flash partition instance
4422 * @param[in] buf - update data buffer
4423 * @param[in] len - data buffer length
4424 * @param[in] offset - offset relative to the partition starting address
4425 * @param[in] cbfn - callback function
4426 * @param[in] cbarg - callback argument
4427 *
4428 * Return status.
4429 */
4430 bfa_status_t
4431 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4432 u8 instance, void *buf, u32 len, u32 offset,
4433 bfa_cb_flash_t cbfn, void *cbarg)
4434 {
4435 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4436 bfa_trc(flash, type);
4437 bfa_trc(flash, instance);
4438 bfa_trc(flash, len);
4439 bfa_trc(flash, offset);
4440
4441 if (!bfa_ioc_is_operational(flash->ioc))
4442 return BFA_STATUS_IOC_NON_OP;
4443
4444 /*
4445 * 'len' must be in word (4-byte) boundary
4446 * 'offset' must be in sector (16kb) boundary
4447 */
4448 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4449 return BFA_STATUS_FLASH_BAD_LEN;
4450
4451 if (type == BFA_FLASH_PART_MFG)
4452 return BFA_STATUS_EINVAL;
4453
4454 if (flash->op_busy) {
4455 bfa_trc(flash, flash->op_busy);
4456 return BFA_STATUS_DEVBUSY;
4457 }
4458
4459 flash->op_busy = 1;
4460 flash->cbfn = cbfn;
4461 flash->cbarg = cbarg;
4462 flash->type = type;
4463 flash->instance = instance;
4464 flash->residue = len;
4465 flash->offset = 0;
4466 flash->addr_off = offset;
4467 flash->ubuf = buf;
4468
4469 bfa_flash_write_send(flash);
4470 return BFA_STATUS_OK;
4471 }
4472
4473 /*
4474 * Read flash partition.
4475 *
4476 * @param[in] flash - flash structure
4477 * @param[in] type - flash partition type
4478 * @param[in] instance - flash partition instance
4479 * @param[in] buf - read data buffer
4480 * @param[in] len - data buffer length
4481 * @param[in] offset - offset relative to the partition starting address
4482 * @param[in] cbfn - callback function
4483 * @param[in] cbarg - callback argument
4484 *
4485 * Return status.
4486 */
4487 bfa_status_t
4488 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4489 u8 instance, void *buf, u32 len, u32 offset,
4490 bfa_cb_flash_t cbfn, void *cbarg)
4491 {
4492 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4493 bfa_trc(flash, type);
4494 bfa_trc(flash, instance);
4495 bfa_trc(flash, len);
4496 bfa_trc(flash, offset);
4497
4498 if (!bfa_ioc_is_operational(flash->ioc))
4499 return BFA_STATUS_IOC_NON_OP;
4500
4501 /*
4502 * 'len' must be in word (4-byte) boundary
4503 * 'offset' must be in sector (16kb) boundary
4504 */
4505 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4506 return BFA_STATUS_FLASH_BAD_LEN;
4507
4508 if (flash->op_busy) {
4509 bfa_trc(flash, flash->op_busy);
4510 return BFA_STATUS_DEVBUSY;
4511 }
4512
4513 flash->op_busy = 1;
4514 flash->cbfn = cbfn;
4515 flash->cbarg = cbarg;
4516 flash->type = type;
4517 flash->instance = instance;
4518 flash->residue = len;
4519 flash->offset = 0;
4520 flash->addr_off = offset;
4521 flash->ubuf = buf;
4522 bfa_flash_read_send(flash);
4523
4524 return BFA_STATUS_OK;
4525 }
4526
4527 /*
4528 * DIAG module specific
4529 */
4530
4531 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4532 #define BFA_DIAG_FWPING_TOV 1000 /* msec */
4533
4534 /* IOC event handler */
4535 static void
4536 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4537 {
4538 struct bfa_diag_s *diag = diag_arg;
4539
4540 bfa_trc(diag, event);
4541 bfa_trc(diag, diag->block);
4542 bfa_trc(diag, diag->fwping.lock);
4543 bfa_trc(diag, diag->tsensor.lock);
4544
4545 switch (event) {
4546 case BFA_IOC_E_DISABLED:
4547 case BFA_IOC_E_FAILED:
4548 if (diag->fwping.lock) {
4549 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4550 diag->fwping.cbfn(diag->fwping.cbarg,
4551 diag->fwping.status);
4552 diag->fwping.lock = 0;
4553 }
4554
4555 if (diag->tsensor.lock) {
4556 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4557 diag->tsensor.cbfn(diag->tsensor.cbarg,
4558 diag->tsensor.status);
4559 diag->tsensor.lock = 0;
4560 }
4561
4562 if (diag->block) {
4563 if (diag->timer_active) {
4564 bfa_timer_stop(&diag->timer);
4565 diag->timer_active = 0;
4566 }
4567
4568 diag->status = BFA_STATUS_IOC_FAILURE;
4569 diag->cbfn(diag->cbarg, diag->status);
4570 diag->block = 0;
4571 }
4572 break;
4573
4574 default:
4575 break;
4576 }
4577 }
4578
4579 static void
4580 bfa_diag_memtest_done(void *cbarg)
4581 {
4582 struct bfa_diag_s *diag = cbarg;
4583 struct bfa_ioc_s *ioc = diag->ioc;
4584 struct bfa_diag_memtest_result *res = diag->result;
4585 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4586 u32 pgnum, pgoff, i;
4587
4588 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4589 pgoff = PSS_SMEM_PGOFF(loff);
4590
4591 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4592
4593 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4594 sizeof(u32)); i++) {
4595 /* read test result from smem */
4596 *((u32 *) res + i) =
4597 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4598 loff += sizeof(u32);
4599 }
4600
4601 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4602 bfa_ioc_reset_fwstate(ioc);
4603
4604 res->status = swab32(res->status);
4605 bfa_trc(diag, res->status);
4606
4607 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4608 diag->status = BFA_STATUS_OK;
4609 else {
4610 diag->status = BFA_STATUS_MEMTEST_FAILED;
4611 res->addr = swab32(res->addr);
4612 res->exp = swab32(res->exp);
4613 res->act = swab32(res->act);
4614 res->err_status = swab32(res->err_status);
4615 res->err_status1 = swab32(res->err_status1);
4616 res->err_addr = swab32(res->err_addr);
4617 bfa_trc(diag, res->addr);
4618 bfa_trc(diag, res->exp);
4619 bfa_trc(diag, res->act);
4620 bfa_trc(diag, res->err_status);
4621 bfa_trc(diag, res->err_status1);
4622 bfa_trc(diag, res->err_addr);
4623 }
4624 diag->timer_active = 0;
4625 diag->cbfn(diag->cbarg, diag->status);
4626 diag->block = 0;
4627 }
4628
4629 /*
4630 * Firmware ping
4631 */
4632
4633 /*
4634 * Perform DMA test directly
4635 */
4636 static void
4637 diag_fwping_send(struct bfa_diag_s *diag)
4638 {
4639 struct bfi_diag_fwping_req_s *fwping_req;
4640 u32 i;
4641
4642 bfa_trc(diag, diag->fwping.dbuf_pa);
4643
4644 /* fill DMA area with pattern */
4645 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4646 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4647
4648 /* Fill mbox msg */
4649 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4650
4651 /* Setup SG list */
4652 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4653 diag->fwping.dbuf_pa);
4654 /* Set up dma count */
4655 fwping_req->count = cpu_to_be32(diag->fwping.count);
4656 /* Set up data pattern */
4657 fwping_req->data = diag->fwping.data;
4658
4659 /* build host command */
4660 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4661 bfa_ioc_portid(diag->ioc));
4662
4663 /* send mbox cmd */
4664 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4665 }
4666
4667 static void
4668 diag_fwping_comp(struct bfa_diag_s *diag,
4669 struct bfi_diag_fwping_rsp_s *diag_rsp)
4670 {
4671 u32 rsp_data = diag_rsp->data;
4672 u8 rsp_dma_status = diag_rsp->dma_status;
4673
4674 bfa_trc(diag, rsp_data);
4675 bfa_trc(diag, rsp_dma_status);
4676
4677 if (rsp_dma_status == BFA_STATUS_OK) {
4678 u32 i, pat;
4679 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4680 diag->fwping.data;
4681 /* Check mbox data */
4682 if (diag->fwping.data != rsp_data) {
4683 bfa_trc(diag, rsp_data);
4684 diag->fwping.result->dmastatus =
4685 BFA_STATUS_DATACORRUPTED;
4686 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4687 diag->fwping.cbfn(diag->fwping.cbarg,
4688 diag->fwping.status);
4689 diag->fwping.lock = 0;
4690 return;
4691 }
4692 /* Check dma pattern */
4693 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4694 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4695 bfa_trc(diag, i);
4696 bfa_trc(diag, pat);
4697 bfa_trc(diag,
4698 *((u32 *)diag->fwping.dbuf_kva + i));
4699 diag->fwping.result->dmastatus =
4700 BFA_STATUS_DATACORRUPTED;
4701 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4702 diag->fwping.cbfn(diag->fwping.cbarg,
4703 diag->fwping.status);
4704 diag->fwping.lock = 0;
4705 return;
4706 }
4707 }
4708 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4709 diag->fwping.status = BFA_STATUS_OK;
4710 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4711 diag->fwping.lock = 0;
4712 } else {
4713 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4714 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4715 diag->fwping.lock = 0;
4716 }
4717 }
4718
4719 /*
4720 * Temperature Sensor
4721 */
4722
4723 static void
4724 diag_tempsensor_send(struct bfa_diag_s *diag)
4725 {
4726 struct bfi_diag_ts_req_s *msg;
4727
4728 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4729 bfa_trc(diag, msg->temp);
4730 /* build host command */
4731 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4732 bfa_ioc_portid(diag->ioc));
4733 /* send mbox cmd */
4734 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4735 }
4736
4737 static void
4738 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4739 {
4740 if (!diag->tsensor.lock) {
4741 /* receiving response after ioc failure */
4742 bfa_trc(diag, diag->tsensor.lock);
4743 return;
4744 }
4745
4746 /*
4747 * ASIC junction tempsensor is a reg read operation
4748 * it will always return OK
4749 */
4750 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4751 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4752 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4753 diag->tsensor.temp->status = BFA_STATUS_OK;
4754
4755 if (rsp->ts_brd) {
4756 if (rsp->status == BFA_STATUS_OK) {
4757 diag->tsensor.temp->brd_temp =
4758 be16_to_cpu(rsp->brd_temp);
4759 } else {
4760 bfa_trc(diag, rsp->status);
4761 diag->tsensor.temp->brd_temp = 0;
4762 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4763 }
4764 }
4765 bfa_trc(diag, rsp->ts_junc);
4766 bfa_trc(diag, rsp->temp);
4767 bfa_trc(diag, rsp->ts_brd);
4768 bfa_trc(diag, rsp->brd_temp);
4769 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4770 diag->tsensor.lock = 0;
4771 }
4772
4773 /*
4774 * LED Test command
4775 */
4776 static void
4777 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4778 {
4779 struct bfi_diag_ledtest_req_s *msg;
4780
4781 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4782 /* build host command */
4783 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4784 bfa_ioc_portid(diag->ioc));
4785
4786 /*
4787 * convert the freq from N blinks per 10 sec to
4788 * crossbow ontime value. We do it here because division is need
4789 */
4790 if (ledtest->freq)
4791 ledtest->freq = 500 / ledtest->freq;
4792
4793 if (ledtest->freq == 0)
4794 ledtest->freq = 1;
4795
4796 bfa_trc(diag, ledtest->freq);
4797 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4798 msg->cmd = (u8) ledtest->cmd;
4799 msg->color = (u8) ledtest->color;
4800 msg->portid = bfa_ioc_portid(diag->ioc);
4801 msg->led = ledtest->led;
4802 msg->freq = cpu_to_be16(ledtest->freq);
4803
4804 /* send mbox cmd */
4805 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4806 }
4807
4808 static void
4809 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4810 {
4811 bfa_trc(diag, diag->ledtest.lock);
4812 diag->ledtest.lock = BFA_FALSE;
4813 /* no bfa_cb_queue is needed because driver is not waiting */
4814 }
4815
4816 /*
4817 * Port beaconing
4818 */
4819 static void
4820 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4821 {
4822 struct bfi_diag_portbeacon_req_s *msg;
4823
4824 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4825 /* build host command */
4826 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4827 bfa_ioc_portid(diag->ioc));
4828 msg->beacon = beacon;
4829 msg->period = cpu_to_be32(sec);
4830 /* send mbox cmd */
4831 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4832 }
4833
4834 static void
4835 diag_portbeacon_comp(struct bfa_diag_s *diag)
4836 {
4837 bfa_trc(diag, diag->beacon.state);
4838 diag->beacon.state = BFA_FALSE;
4839 if (diag->cbfn_beacon)
4840 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4841 }
4842
4843 /*
4844 * Diag hmbox handler
4845 */
4846 void
4847 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4848 {
4849 struct bfa_diag_s *diag = diagarg;
4850
4851 switch (msg->mh.msg_id) {
4852 case BFI_DIAG_I2H_PORTBEACON:
4853 diag_portbeacon_comp(diag);
4854 break;
4855 case BFI_DIAG_I2H_FWPING:
4856 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4857 break;
4858 case BFI_DIAG_I2H_TEMPSENSOR:
4859 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4860 break;
4861 case BFI_DIAG_I2H_LEDTEST:
4862 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4863 break;
4864 default:
4865 bfa_trc(diag, msg->mh.msg_id);
4866 WARN_ON(1);
4867 }
4868 }
4869
4870 /*
4871 * Gen RAM Test
4872 *
4873 * @param[in] *diag - diag data struct
4874 * @param[in] *memtest - mem test params input from upper layer,
4875 * @param[in] pattern - mem test pattern
4876 * @param[in] *result - mem test result
4877 * @param[in] cbfn - mem test callback functioin
4878 * @param[in] cbarg - callback functioin arg
4879 *
4880 * @param[out]
4881 */
4882 bfa_status_t
4883 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4884 u32 pattern, struct bfa_diag_memtest_result *result,
4885 bfa_cb_diag_t cbfn, void *cbarg)
4886 {
4887 bfa_trc(diag, pattern);
4888
4889 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4890 return BFA_STATUS_ADAPTER_ENABLED;
4891
4892 /* check to see if there is another destructive diag cmd running */
4893 if (diag->block) {
4894 bfa_trc(diag, diag->block);
4895 return BFA_STATUS_DEVBUSY;
4896 } else
4897 diag->block = 1;
4898
4899 diag->result = result;
4900 diag->cbfn = cbfn;
4901 diag->cbarg = cbarg;
4902
4903 /* download memtest code and take LPU0 out of reset */
4904 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4905
4906 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4907 bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
4908 diag->timer_active = 1;
4909 return BFA_STATUS_OK;
4910 }
4911
4912 /*
4913 * DIAG firmware ping command
4914 *
4915 * @param[in] *diag - diag data struct
4916 * @param[in] cnt - dma loop count for testing PCIE
4917 * @param[in] data - data pattern to pass in fw
4918 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4919 * @param[in] cbfn - callback function
4920 * @param[in] *cbarg - callback functioin arg
4921 *
4922 * @param[out]
4923 */
4924 bfa_status_t
4925 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4926 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4927 void *cbarg)
4928 {
4929 bfa_trc(diag, cnt);
4930 bfa_trc(diag, data);
4931
4932 if (!bfa_ioc_is_operational(diag->ioc))
4933 return BFA_STATUS_IOC_NON_OP;
4934
4935 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4936 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4937 return BFA_STATUS_CMD_NOTSUPP;
4938
4939 /* check to see if there is another destructive diag cmd running */
4940 if (diag->block || diag->fwping.lock) {
4941 bfa_trc(diag, diag->block);
4942 bfa_trc(diag, diag->fwping.lock);
4943 return BFA_STATUS_DEVBUSY;
4944 }
4945
4946 /* Initialization */
4947 diag->fwping.lock = 1;
4948 diag->fwping.cbfn = cbfn;
4949 diag->fwping.cbarg = cbarg;
4950 diag->fwping.result = result;
4951 diag->fwping.data = data;
4952 diag->fwping.count = cnt;
4953
4954 /* Init test results */
4955 diag->fwping.result->data = 0;
4956 diag->fwping.result->status = BFA_STATUS_OK;
4957
4958 /* kick off the first ping */
4959 diag_fwping_send(diag);
4960 return BFA_STATUS_OK;
4961 }
4962
4963 /*
4964 * Read Temperature Sensor
4965 *
4966 * @param[in] *diag - diag data struct
4967 * @param[in] *result - pt to bfa_diag_temp_t data struct
4968 * @param[in] cbfn - callback function
4969 * @param[in] *cbarg - callback functioin arg
4970 *
4971 * @param[out]
4972 */
4973 bfa_status_t
4974 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4975 struct bfa_diag_results_tempsensor_s *result,
4976 bfa_cb_diag_t cbfn, void *cbarg)
4977 {
4978 /* check to see if there is a destructive diag cmd running */
4979 if (diag->block || diag->tsensor.lock) {
4980 bfa_trc(diag, diag->block);
4981 bfa_trc(diag, diag->tsensor.lock);
4982 return BFA_STATUS_DEVBUSY;
4983 }
4984
4985 if (!bfa_ioc_is_operational(diag->ioc))
4986 return BFA_STATUS_IOC_NON_OP;
4987
4988 /* Init diag mod params */
4989 diag->tsensor.lock = 1;
4990 diag->tsensor.temp = result;
4991 diag->tsensor.cbfn = cbfn;
4992 diag->tsensor.cbarg = cbarg;
4993
4994 /* Send msg to fw */
4995 diag_tempsensor_send(diag);
4996
4997 return BFA_STATUS_OK;
4998 }
4999
5000 /*
5001 * LED Test command
5002 *
5003 * @param[in] *diag - diag data struct
5004 * @param[in] *ledtest - pt to ledtest data structure
5005 *
5006 * @param[out]
5007 */
5008 bfa_status_t
5009 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5010 {
5011 bfa_trc(diag, ledtest->cmd);
5012
5013 if (!bfa_ioc_is_operational(diag->ioc))
5014 return BFA_STATUS_IOC_NON_OP;
5015
5016 if (diag->beacon.state)
5017 return BFA_STATUS_BEACON_ON;
5018
5019 if (diag->ledtest.lock)
5020 return BFA_STATUS_LEDTEST_OP;
5021
5022 /* Send msg to fw */
5023 diag->ledtest.lock = BFA_TRUE;
5024 diag_ledtest_send(diag, ledtest);
5025
5026 return BFA_STATUS_OK;
5027 }
5028
5029 /*
5030 * Port beaconing command
5031 *
5032 * @param[in] *diag - diag data struct
5033 * @param[in] beacon - port beaconing 1:ON 0:OFF
5034 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5035 * @param[in] sec - beaconing duration in seconds
5036 *
5037 * @param[out]
5038 */
5039 bfa_status_t
5040 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5041 bfa_boolean_t link_e2e_beacon, uint32_t sec)
5042 {
5043 bfa_trc(diag, beacon);
5044 bfa_trc(diag, link_e2e_beacon);
5045 bfa_trc(diag, sec);
5046
5047 if (!bfa_ioc_is_operational(diag->ioc))
5048 return BFA_STATUS_IOC_NON_OP;
5049
5050 if (diag->ledtest.lock)
5051 return BFA_STATUS_LEDTEST_OP;
5052
5053 if (diag->beacon.state && beacon) /* beacon alread on */
5054 return BFA_STATUS_BEACON_ON;
5055
5056 diag->beacon.state = beacon;
5057 diag->beacon.link_e2e = link_e2e_beacon;
5058 if (diag->cbfn_beacon)
5059 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5060
5061 /* Send msg to fw */
5062 diag_portbeacon_send(diag, beacon, sec);
5063
5064 return BFA_STATUS_OK;
5065 }
5066
5067 /*
5068 * Return DMA memory needed by diag module.
5069 */
5070 u32
5071 bfa_diag_meminfo(void)
5072 {
5073 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5074 }
5075
5076 /*
5077 * Attach virtual and physical memory for Diag.
5078 */
5079 void
5080 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5081 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5082 {
5083 diag->dev = dev;
5084 diag->ioc = ioc;
5085 diag->trcmod = trcmod;
5086
5087 diag->block = 0;
5088 diag->cbfn = NULL;
5089 diag->cbarg = NULL;
5090 diag->result = NULL;
5091 diag->cbfn_beacon = cbfn_beacon;
5092
5093 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5094 bfa_q_qe_init(&diag->ioc_notify);
5095 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5096 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5097 }
5098
5099 void
5100 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5101 {
5102 diag->fwping.dbuf_kva = dm_kva;
5103 diag->fwping.dbuf_pa = dm_pa;
5104 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5105 }
5106
5107 /*
5108 * PHY module specific
5109 */
5110 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5111 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5112
5113 static void
5114 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5115 {
5116 int i, m = sz >> 2;
5117
5118 for (i = 0; i < m; i++)
5119 obuf[i] = be32_to_cpu(ibuf[i]);
5120 }
5121
5122 static bfa_boolean_t
5123 bfa_phy_present(struct bfa_phy_s *phy)
5124 {
5125 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5126 }
5127
5128 static void
5129 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5130 {
5131 struct bfa_phy_s *phy = cbarg;
5132
5133 bfa_trc(phy, event);
5134
5135 switch (event) {
5136 case BFA_IOC_E_DISABLED:
5137 case BFA_IOC_E_FAILED:
5138 if (phy->op_busy) {
5139 phy->status = BFA_STATUS_IOC_FAILURE;
5140 phy->cbfn(phy->cbarg, phy->status);
5141 phy->op_busy = 0;
5142 }
5143 break;
5144
5145 default:
5146 break;
5147 }
5148 }
5149
5150 /*
5151 * Send phy attribute query request.
5152 *
5153 * @param[in] cbarg - callback argument
5154 */
5155 static void
5156 bfa_phy_query_send(void *cbarg)
5157 {
5158 struct bfa_phy_s *phy = cbarg;
5159 struct bfi_phy_query_req_s *msg =
5160 (struct bfi_phy_query_req_s *) phy->mb.msg;
5161
5162 msg->instance = phy->instance;
5163 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5164 bfa_ioc_portid(phy->ioc));
5165 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5166 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5167 }
5168
5169 /*
5170 * Send phy write request.
5171 *
5172 * @param[in] cbarg - callback argument
5173 */
5174 static void
5175 bfa_phy_write_send(void *cbarg)
5176 {
5177 struct bfa_phy_s *phy = cbarg;
5178 struct bfi_phy_write_req_s *msg =
5179 (struct bfi_phy_write_req_s *) phy->mb.msg;
5180 u32 len;
5181 u16 *buf, *dbuf;
5182 int i, sz;
5183
5184 msg->instance = phy->instance;
5185 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5186 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5187 phy->residue : BFA_PHY_DMA_BUF_SZ;
5188 msg->length = cpu_to_be32(len);
5189
5190 /* indicate if it's the last msg of the whole write operation */
5191 msg->last = (len == phy->residue) ? 1 : 0;
5192
5193 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5194 bfa_ioc_portid(phy->ioc));
5195 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5196
5197 buf = (u16 *) (phy->ubuf + phy->offset);
5198 dbuf = (u16 *)phy->dbuf_kva;
5199 sz = len >> 1;
5200 for (i = 0; i < sz; i++)
5201 buf[i] = cpu_to_be16(dbuf[i]);
5202
5203 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5204
5205 phy->residue -= len;
5206 phy->offset += len;
5207 }
5208
5209 /*
5210 * Send phy read request.
5211 *
5212 * @param[in] cbarg - callback argument
5213 */
5214 static void
5215 bfa_phy_read_send(void *cbarg)
5216 {
5217 struct bfa_phy_s *phy = cbarg;
5218 struct bfi_phy_read_req_s *msg =
5219 (struct bfi_phy_read_req_s *) phy->mb.msg;
5220 u32 len;
5221
5222 msg->instance = phy->instance;
5223 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5224 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5225 phy->residue : BFA_PHY_DMA_BUF_SZ;
5226 msg->length = cpu_to_be32(len);
5227 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5228 bfa_ioc_portid(phy->ioc));
5229 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5230 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5231 }
5232
5233 /*
5234 * Send phy stats request.
5235 *
5236 * @param[in] cbarg - callback argument
5237 */
5238 static void
5239 bfa_phy_stats_send(void *cbarg)
5240 {
5241 struct bfa_phy_s *phy = cbarg;
5242 struct bfi_phy_stats_req_s *msg =
5243 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5244
5245 msg->instance = phy->instance;
5246 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5247 bfa_ioc_portid(phy->ioc));
5248 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5249 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5250 }
5251
5252 /*
5253 * Flash memory info API.
5254 *
5255 * @param[in] mincfg - minimal cfg variable
5256 */
5257 u32
5258 bfa_phy_meminfo(bfa_boolean_t mincfg)
5259 {
5260 /* min driver doesn't need phy */
5261 if (mincfg)
5262 return 0;
5263
5264 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5265 }
5266
5267 /*
5268 * Flash attach API.
5269 *
5270 * @param[in] phy - phy structure
5271 * @param[in] ioc - ioc structure
5272 * @param[in] dev - device structure
5273 * @param[in] trcmod - trace module
5274 * @param[in] logmod - log module
5275 */
5276 void
5277 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5278 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5279 {
5280 phy->ioc = ioc;
5281 phy->trcmod = trcmod;
5282 phy->cbfn = NULL;
5283 phy->cbarg = NULL;
5284 phy->op_busy = 0;
5285
5286 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5287 bfa_q_qe_init(&phy->ioc_notify);
5288 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5289 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5290
5291 /* min driver doesn't need phy */
5292 if (mincfg) {
5293 phy->dbuf_kva = NULL;
5294 phy->dbuf_pa = 0;
5295 }
5296 }
5297
5298 /*
5299 * Claim memory for phy
5300 *
5301 * @param[in] phy - phy structure
5302 * @param[in] dm_kva - pointer to virtual memory address
5303 * @param[in] dm_pa - physical memory address
5304 * @param[in] mincfg - minimal cfg variable
5305 */
5306 void
5307 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5308 bfa_boolean_t mincfg)
5309 {
5310 if (mincfg)
5311 return;
5312
5313 phy->dbuf_kva = dm_kva;
5314 phy->dbuf_pa = dm_pa;
5315 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5316 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5317 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5318 }
5319
5320 bfa_boolean_t
5321 bfa_phy_busy(struct bfa_ioc_s *ioc)
5322 {
5323 void __iomem *rb;
5324
5325 rb = bfa_ioc_bar0(ioc);
5326 return readl(rb + BFA_PHY_LOCK_STATUS);
5327 }
5328
5329 /*
5330 * Get phy attribute.
5331 *
5332 * @param[in] phy - phy structure
5333 * @param[in] attr - phy attribute structure
5334 * @param[in] cbfn - callback function
5335 * @param[in] cbarg - callback argument
5336 *
5337 * Return status.
5338 */
5339 bfa_status_t
5340 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5341 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5342 {
5343 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5344 bfa_trc(phy, instance);
5345
5346 if (!bfa_phy_present(phy))
5347 return BFA_STATUS_PHY_NOT_PRESENT;
5348
5349 if (!bfa_ioc_is_operational(phy->ioc))
5350 return BFA_STATUS_IOC_NON_OP;
5351
5352 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5353 bfa_trc(phy, phy->op_busy);
5354 return BFA_STATUS_DEVBUSY;
5355 }
5356
5357 phy->op_busy = 1;
5358 phy->cbfn = cbfn;
5359 phy->cbarg = cbarg;
5360 phy->instance = instance;
5361 phy->ubuf = (uint8_t *) attr;
5362 bfa_phy_query_send(phy);
5363
5364 return BFA_STATUS_OK;
5365 }
5366
5367 /*
5368 * Get phy stats.
5369 *
5370 * @param[in] phy - phy structure
5371 * @param[in] instance - phy image instance
5372 * @param[in] stats - pointer to phy stats
5373 * @param[in] cbfn - callback function
5374 * @param[in] cbarg - callback argument
5375 *
5376 * Return status.
5377 */
5378 bfa_status_t
5379 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5380 struct bfa_phy_stats_s *stats,
5381 bfa_cb_phy_t cbfn, void *cbarg)
5382 {
5383 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5384 bfa_trc(phy, instance);
5385
5386 if (!bfa_phy_present(phy))
5387 return BFA_STATUS_PHY_NOT_PRESENT;
5388
5389 if (!bfa_ioc_is_operational(phy->ioc))
5390 return BFA_STATUS_IOC_NON_OP;
5391
5392 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5393 bfa_trc(phy, phy->op_busy);
5394 return BFA_STATUS_DEVBUSY;
5395 }
5396
5397 phy->op_busy = 1;
5398 phy->cbfn = cbfn;
5399 phy->cbarg = cbarg;
5400 phy->instance = instance;
5401 phy->ubuf = (u8 *) stats;
5402 bfa_phy_stats_send(phy);
5403
5404 return BFA_STATUS_OK;
5405 }
5406
5407 /*
5408 * Update phy image.
5409 *
5410 * @param[in] phy - phy structure
5411 * @param[in] instance - phy image instance
5412 * @param[in] buf - update data buffer
5413 * @param[in] len - data buffer length
5414 * @param[in] offset - offset relative to starting address
5415 * @param[in] cbfn - callback function
5416 * @param[in] cbarg - callback argument
5417 *
5418 * Return status.
5419 */
5420 bfa_status_t
5421 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5422 void *buf, u32 len, u32 offset,
5423 bfa_cb_phy_t cbfn, void *cbarg)
5424 {
5425 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5426 bfa_trc(phy, instance);
5427 bfa_trc(phy, len);
5428 bfa_trc(phy, offset);
5429
5430 if (!bfa_phy_present(phy))
5431 return BFA_STATUS_PHY_NOT_PRESENT;
5432
5433 if (!bfa_ioc_is_operational(phy->ioc))
5434 return BFA_STATUS_IOC_NON_OP;
5435
5436 /* 'len' must be in word (4-byte) boundary */
5437 if (!len || (len & 0x03))
5438 return BFA_STATUS_FAILED;
5439
5440 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5441 bfa_trc(phy, phy->op_busy);
5442 return BFA_STATUS_DEVBUSY;
5443 }
5444
5445 phy->op_busy = 1;
5446 phy->cbfn = cbfn;
5447 phy->cbarg = cbarg;
5448 phy->instance = instance;
5449 phy->residue = len;
5450 phy->offset = 0;
5451 phy->addr_off = offset;
5452 phy->ubuf = buf;
5453
5454 bfa_phy_write_send(phy);
5455 return BFA_STATUS_OK;
5456 }
5457
5458 /*
5459 * Read phy image.
5460 *
5461 * @param[in] phy - phy structure
5462 * @param[in] instance - phy image instance
5463 * @param[in] buf - read data buffer
5464 * @param[in] len - data buffer length
5465 * @param[in] offset - offset relative to starting address
5466 * @param[in] cbfn - callback function
5467 * @param[in] cbarg - callback argument
5468 *
5469 * Return status.
5470 */
5471 bfa_status_t
5472 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5473 void *buf, u32 len, u32 offset,
5474 bfa_cb_phy_t cbfn, void *cbarg)
5475 {
5476 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5477 bfa_trc(phy, instance);
5478 bfa_trc(phy, len);
5479 bfa_trc(phy, offset);
5480
5481 if (!bfa_phy_present(phy))
5482 return BFA_STATUS_PHY_NOT_PRESENT;
5483
5484 if (!bfa_ioc_is_operational(phy->ioc))
5485 return BFA_STATUS_IOC_NON_OP;
5486
5487 /* 'len' must be in word (4-byte) boundary */
5488 if (!len || (len & 0x03))
5489 return BFA_STATUS_FAILED;
5490
5491 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5492 bfa_trc(phy, phy->op_busy);
5493 return BFA_STATUS_DEVBUSY;
5494 }
5495
5496 phy->op_busy = 1;
5497 phy->cbfn = cbfn;
5498 phy->cbarg = cbarg;
5499 phy->instance = instance;
5500 phy->residue = len;
5501 phy->offset = 0;
5502 phy->addr_off = offset;
5503 phy->ubuf = buf;
5504 bfa_phy_read_send(phy);
5505
5506 return BFA_STATUS_OK;
5507 }
5508
5509 /*
5510 * Process phy response messages upon receiving interrupts.
5511 *
5512 * @param[in] phyarg - phy structure
5513 * @param[in] msg - message structure
5514 */
5515 void
5516 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5517 {
5518 struct bfa_phy_s *phy = phyarg;
5519 u32 status;
5520
5521 union {
5522 struct bfi_phy_query_rsp_s *query;
5523 struct bfi_phy_stats_rsp_s *stats;
5524 struct bfi_phy_write_rsp_s *write;
5525 struct bfi_phy_read_rsp_s *read;
5526 struct bfi_mbmsg_s *msg;
5527 } m;
5528
5529 m.msg = msg;
5530 bfa_trc(phy, msg->mh.msg_id);
5531
5532 if (!phy->op_busy) {
5533 /* receiving response after ioc failure */
5534 bfa_trc(phy, 0x9999);
5535 return;
5536 }
5537
5538 switch (msg->mh.msg_id) {
5539 case BFI_PHY_I2H_QUERY_RSP:
5540 status = be32_to_cpu(m.query->status);
5541 bfa_trc(phy, status);
5542
5543 if (status == BFA_STATUS_OK) {
5544 struct bfa_phy_attr_s *attr =
5545 (struct bfa_phy_attr_s *) phy->ubuf;
5546 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5547 sizeof(struct bfa_phy_attr_s));
5548 bfa_trc(phy, attr->status);
5549 bfa_trc(phy, attr->length);
5550 }
5551
5552 phy->status = status;
5553 phy->op_busy = 0;
5554 if (phy->cbfn)
5555 phy->cbfn(phy->cbarg, phy->status);
5556 break;
5557 case BFI_PHY_I2H_STATS_RSP:
5558 status = be32_to_cpu(m.stats->status);
5559 bfa_trc(phy, status);
5560
5561 if (status == BFA_STATUS_OK) {
5562 struct bfa_phy_stats_s *stats =
5563 (struct bfa_phy_stats_s *) phy->ubuf;
5564 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5565 sizeof(struct bfa_phy_stats_s));
5566 bfa_trc(phy, stats->status);
5567 }
5568
5569 phy->status = status;
5570 phy->op_busy = 0;
5571 if (phy->cbfn)
5572 phy->cbfn(phy->cbarg, phy->status);
5573 break;
5574 case BFI_PHY_I2H_WRITE_RSP:
5575 status = be32_to_cpu(m.write->status);
5576 bfa_trc(phy, status);
5577
5578 if (status != BFA_STATUS_OK || phy->residue == 0) {
5579 phy->status = status;
5580 phy->op_busy = 0;
5581 if (phy->cbfn)
5582 phy->cbfn(phy->cbarg, phy->status);
5583 } else {
5584 bfa_trc(phy, phy->offset);
5585 bfa_phy_write_send(phy);
5586 }
5587 break;
5588 case BFI_PHY_I2H_READ_RSP:
5589 status = be32_to_cpu(m.read->status);
5590 bfa_trc(phy, status);
5591
5592 if (status != BFA_STATUS_OK) {
5593 phy->status = status;
5594 phy->op_busy = 0;
5595 if (phy->cbfn)
5596 phy->cbfn(phy->cbarg, phy->status);
5597 } else {
5598 u32 len = be32_to_cpu(m.read->length);
5599 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5600 u16 *dbuf = (u16 *)phy->dbuf_kva;
5601 int i, sz = len >> 1;
5602
5603 bfa_trc(phy, phy->offset);
5604 bfa_trc(phy, len);
5605
5606 for (i = 0; i < sz; i++)
5607 buf[i] = be16_to_cpu(dbuf[i]);
5608
5609 phy->residue -= len;
5610 phy->offset += len;
5611
5612 if (phy->residue == 0) {
5613 phy->status = status;
5614 phy->op_busy = 0;
5615 if (phy->cbfn)
5616 phy->cbfn(phy->cbarg, phy->status);
5617 } else
5618 bfa_phy_read_send(phy);
5619 }
5620 break;
5621 default:
5622 WARN_ON(1);
5623 }
5624 }
5625
5626 /*
5627 * DCONF module specific
5628 */
5629
5630 BFA_MODULE(dconf);
5631
5632 /*
5633 * DCONF state machine events
5634 */
5635 enum bfa_dconf_event {
5636 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5637 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5638 BFA_DCONF_SM_WR = 3, /* binding change, map */
5639 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5640 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5641 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5642 };
5643
5644 /* forward declaration of DCONF state machine */
5645 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5646 enum bfa_dconf_event event);
5647 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5648 enum bfa_dconf_event event);
5649 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5650 enum bfa_dconf_event event);
5651 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5652 enum bfa_dconf_event event);
5653 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5654 enum bfa_dconf_event event);
5655 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5656 enum bfa_dconf_event event);
5657 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5658 enum bfa_dconf_event event);
5659
5660 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5661 static void bfa_dconf_timer(void *cbarg);
5662 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5663 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5664
5665 /*
5666 * Begining state of dconf module. Waiting for an event to start.
5667 */
5668 static void
5669 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5670 {
5671 bfa_status_t bfa_status;
5672 bfa_trc(dconf->bfa, event);
5673
5674 switch (event) {
5675 case BFA_DCONF_SM_INIT:
5676 if (dconf->min_cfg) {
5677 bfa_trc(dconf->bfa, dconf->min_cfg);
5678 return;
5679 }
5680 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5681 dconf->flashdone = BFA_FALSE;
5682 bfa_trc(dconf->bfa, dconf->flashdone);
5683 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5684 BFA_FLASH_PART_DRV, dconf->instance,
5685 dconf->dconf,
5686 sizeof(struct bfa_dconf_s), 0,
5687 bfa_dconf_init_cb, dconf->bfa);
5688 if (bfa_status != BFA_STATUS_OK) {
5689 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5690 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5691 return;
5692 }
5693 break;
5694 case BFA_DCONF_SM_EXIT:
5695 dconf->flashdone = BFA_TRUE;
5696 case BFA_DCONF_SM_IOCDISABLE:
5697 case BFA_DCONF_SM_WR:
5698 case BFA_DCONF_SM_FLASH_COMP:
5699 break;
5700 default:
5701 bfa_sm_fault(dconf->bfa, event);
5702 }
5703 }
5704
5705 /*
5706 * Read flash for dconf entries and make a call back to the driver once done.
5707 */
5708 static void
5709 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5710 enum bfa_dconf_event event)
5711 {
5712 bfa_trc(dconf->bfa, event);
5713
5714 switch (event) {
5715 case BFA_DCONF_SM_FLASH_COMP:
5716 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5717 break;
5718 case BFA_DCONF_SM_TIMEOUT:
5719 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5720 break;
5721 case BFA_DCONF_SM_EXIT:
5722 dconf->flashdone = BFA_TRUE;
5723 bfa_trc(dconf->bfa, dconf->flashdone);
5724 case BFA_DCONF_SM_IOCDISABLE:
5725 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5726 break;
5727 default:
5728 bfa_sm_fault(dconf->bfa, event);
5729 }
5730 }
5731
5732 /*
5733 * DCONF Module is in ready state. Has completed the initialization.
5734 */
5735 static void
5736 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5737 {
5738 bfa_trc(dconf->bfa, event);
5739
5740 switch (event) {
5741 case BFA_DCONF_SM_WR:
5742 bfa_timer_start(dconf->bfa, &dconf->timer,
5743 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5744 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5745 break;
5746 case BFA_DCONF_SM_EXIT:
5747 dconf->flashdone = BFA_TRUE;
5748 bfa_trc(dconf->bfa, dconf->flashdone);
5749 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5750 break;
5751 case BFA_DCONF_SM_INIT:
5752 case BFA_DCONF_SM_IOCDISABLE:
5753 break;
5754 default:
5755 bfa_sm_fault(dconf->bfa, event);
5756 }
5757 }
5758
5759 /*
5760 * entries are dirty, write back to the flash.
5761 */
5762
5763 static void
5764 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5765 {
5766 bfa_trc(dconf->bfa, event);
5767
5768 switch (event) {
5769 case BFA_DCONF_SM_TIMEOUT:
5770 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5771 bfa_dconf_flash_write(dconf);
5772 break;
5773 case BFA_DCONF_SM_WR:
5774 bfa_timer_stop(&dconf->timer);
5775 bfa_timer_start(dconf->bfa, &dconf->timer,
5776 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5777 break;
5778 case BFA_DCONF_SM_EXIT:
5779 bfa_timer_stop(&dconf->timer);
5780 bfa_timer_start(dconf->bfa, &dconf->timer,
5781 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5782 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5783 bfa_dconf_flash_write(dconf);
5784 break;
5785 case BFA_DCONF_SM_FLASH_COMP:
5786 break;
5787 case BFA_DCONF_SM_IOCDISABLE:
5788 bfa_timer_stop(&dconf->timer);
5789 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5790 break;
5791 default:
5792 bfa_sm_fault(dconf->bfa, event);
5793 }
5794 }
5795
5796 /*
5797 * Sync the dconf entries to the flash.
5798 */
5799 static void
5800 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5801 enum bfa_dconf_event event)
5802 {
5803 bfa_trc(dconf->bfa, event);
5804
5805 switch (event) {
5806 case BFA_DCONF_SM_IOCDISABLE:
5807 case BFA_DCONF_SM_FLASH_COMP:
5808 bfa_timer_stop(&dconf->timer);
5809 case BFA_DCONF_SM_TIMEOUT:
5810 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5811 dconf->flashdone = BFA_TRUE;
5812 bfa_trc(dconf->bfa, dconf->flashdone);
5813 bfa_ioc_disable(&dconf->bfa->ioc);
5814 break;
5815 default:
5816 bfa_sm_fault(dconf->bfa, event);
5817 }
5818 }
5819
5820 static void
5821 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5822 {
5823 bfa_trc(dconf->bfa, event);
5824
5825 switch (event) {
5826 case BFA_DCONF_SM_FLASH_COMP:
5827 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5828 break;
5829 case BFA_DCONF_SM_WR:
5830 bfa_timer_start(dconf->bfa, &dconf->timer,
5831 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5832 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5833 break;
5834 case BFA_DCONF_SM_EXIT:
5835 bfa_timer_start(dconf->bfa, &dconf->timer,
5836 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5837 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5838 break;
5839 case BFA_DCONF_SM_IOCDISABLE:
5840 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5841 break;
5842 default:
5843 bfa_sm_fault(dconf->bfa, event);
5844 }
5845 }
5846
5847 static void
5848 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5849 enum bfa_dconf_event event)
5850 {
5851 bfa_trc(dconf->bfa, event);
5852
5853 switch (event) {
5854 case BFA_DCONF_SM_INIT:
5855 bfa_timer_start(dconf->bfa, &dconf->timer,
5856 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5857 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5858 break;
5859 case BFA_DCONF_SM_EXIT:
5860 dconf->flashdone = BFA_TRUE;
5861 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5862 break;
5863 case BFA_DCONF_SM_IOCDISABLE:
5864 break;
5865 default:
5866 bfa_sm_fault(dconf->bfa, event);
5867 }
5868 }
5869
5870 /*
5871 * Compute and return memory needed by DRV_CFG module.
5872 */
5873 static void
5874 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5875 struct bfa_s *bfa)
5876 {
5877 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5878
5879 if (cfg->drvcfg.min_cfg)
5880 bfa_mem_kva_setup(meminfo, dconf_kva,
5881 sizeof(struct bfa_dconf_hdr_s));
5882 else
5883 bfa_mem_kva_setup(meminfo, dconf_kva,
5884 sizeof(struct bfa_dconf_s));
5885 }
5886
5887 static void
5888 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5889 struct bfa_pcidev_s *pcidev)
5890 {
5891 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5892
5893 dconf->bfad = bfad;
5894 dconf->bfa = bfa;
5895 dconf->instance = bfa->ioc.port_id;
5896 bfa_trc(bfa, dconf->instance);
5897
5898 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5899 if (cfg->drvcfg.min_cfg) {
5900 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5901 dconf->min_cfg = BFA_TRUE;
5902 /*
5903 * Set the flashdone flag to TRUE explicitly as no flash
5904 * write will happen in min_cfg mode.
5905 */
5906 dconf->flashdone = BFA_TRUE;
5907 } else {
5908 dconf->min_cfg = BFA_FALSE;
5909 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5910 }
5911
5912 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5913 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5914 }
5915
5916 static void
5917 bfa_dconf_init_cb(void *arg, bfa_status_t status)
5918 {
5919 struct bfa_s *bfa = arg;
5920 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5921
5922 dconf->flashdone = BFA_TRUE;
5923 bfa_trc(bfa, dconf->flashdone);
5924 bfa_iocfc_cb_dconf_modinit(bfa, status);
5925 if (status == BFA_STATUS_OK) {
5926 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5927 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5928 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5929 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5930 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5931 }
5932 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5933 }
5934
5935 void
5936 bfa_dconf_modinit(struct bfa_s *bfa)
5937 {
5938 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5939 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5940 }
5941 static void
5942 bfa_dconf_start(struct bfa_s *bfa)
5943 {
5944 }
5945
5946 static void
5947 bfa_dconf_stop(struct bfa_s *bfa)
5948 {
5949 }
5950
5951 static void bfa_dconf_timer(void *cbarg)
5952 {
5953 struct bfa_dconf_mod_s *dconf = cbarg;
5954 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5955 }
5956 static void
5957 bfa_dconf_iocdisable(struct bfa_s *bfa)
5958 {
5959 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5960 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5961 }
5962
5963 static void
5964 bfa_dconf_detach(struct bfa_s *bfa)
5965 {
5966 }
5967
5968 static bfa_status_t
5969 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5970 {
5971 bfa_status_t bfa_status;
5972 bfa_trc(dconf->bfa, 0);
5973
5974 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5975 BFA_FLASH_PART_DRV, dconf->instance,
5976 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5977 bfa_dconf_cbfn, dconf);
5978 if (bfa_status != BFA_STATUS_OK)
5979 WARN_ON(bfa_status);
5980 bfa_trc(dconf->bfa, bfa_status);
5981
5982 return bfa_status;
5983 }
5984
5985 bfa_status_t
5986 bfa_dconf_update(struct bfa_s *bfa)
5987 {
5988 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5989 bfa_trc(dconf->bfa, 0);
5990 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5991 return BFA_STATUS_FAILED;
5992
5993 if (dconf->min_cfg) {
5994 bfa_trc(dconf->bfa, dconf->min_cfg);
5995 return BFA_STATUS_FAILED;
5996 }
5997
5998 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5999 return BFA_STATUS_OK;
6000 }
6001
6002 static void
6003 bfa_dconf_cbfn(void *arg, bfa_status_t status)
6004 {
6005 struct bfa_dconf_mod_s *dconf = arg;
6006 WARN_ON(status);
6007 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6008 }
6009
6010 void
6011 bfa_dconf_modexit(struct bfa_s *bfa)
6012 {
6013 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6014 BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
6015 bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
6016 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6017 }