2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include <cs/bfa_debug.h>
20 #include <bfa_cb_ioim_macros.h>
22 BFA_TRC_FILE(HAL
, IOIM
);
25 * forward declarations.
27 static bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
);
28 static bfa_boolean_t
bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
);
29 static void bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
);
30 static bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
);
31 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
);
32 static void __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
);
33 static void __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
);
34 static void __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
);
35 static void __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
);
36 static void __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
);
43 * IO state machine events
46 BFA_IOIM_SM_START
= 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD
= 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP
= 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG
= 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE
= 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE
= 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT
= 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP
= 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE
= 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME
= 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED
= 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY
= 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB
= 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP
= 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART
= 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE
= 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL
= 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV
= 18, /* ITN offline TOV */
67 * forward declaration of IO state machine
69 static void bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
,
70 enum bfa_ioim_event event
);
71 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
,
72 enum bfa_ioim_event event
);
73 static void bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
,
74 enum bfa_ioim_event event
);
75 static void bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
,
76 enum bfa_ioim_event event
);
77 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
,
78 enum bfa_ioim_event event
);
79 static void bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
,
80 enum bfa_ioim_event event
);
81 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
,
82 enum bfa_ioim_event event
);
83 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
,
84 enum bfa_ioim_event event
);
85 static void bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
,
86 enum bfa_ioim_event event
);
87 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
,
88 enum bfa_ioim_event event
);
89 static void bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
,
90 enum bfa_ioim_event event
);
93 * IO is not started (unallocated).
96 bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
98 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
99 bfa_trc_fp(ioim
->bfa
, event
);
102 case BFA_IOIM_SM_START
:
103 if (!bfa_itnim_is_online(ioim
->itnim
)) {
104 if (!bfa_itnim_hold_io(ioim
->itnim
)) {
105 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
107 list_add_tail(&ioim
->qe
,
108 &ioim
->fcpim
->ioim_comp_q
);
109 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
110 __bfa_cb_ioim_pathtov
, ioim
);
113 list_add_tail(&ioim
->qe
,
114 &ioim
->itnim
->pending_q
);
119 if (ioim
->nsges
> BFI_SGE_INLINE
) {
120 if (!bfa_ioim_sge_setup(ioim
)) {
121 bfa_sm_set_state(ioim
, bfa_ioim_sm_sgalloc
);
126 if (!bfa_ioim_send_ioreq(ioim
)) {
127 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
131 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
134 case BFA_IOIM_SM_IOTOV
:
135 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
136 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
137 __bfa_cb_ioim_pathtov
, ioim
);
140 case BFA_IOIM_SM_ABORT
:
142 * IO in pending queue can get abort requests. Complete abort
143 * requests immediately.
145 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
146 bfa_assert(bfa_q_is_on_q(&ioim
->itnim
->pending_q
, ioim
));
147 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
152 bfa_sm_fault(ioim
->bfa
, event
);
157 * IO is waiting for SG pages.
160 bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
162 bfa_trc(ioim
->bfa
, ioim
->iotag
);
163 bfa_trc(ioim
->bfa
, event
);
166 case BFA_IOIM_SM_SGALLOCED
:
167 if (!bfa_ioim_send_ioreq(ioim
)) {
168 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
171 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
174 case BFA_IOIM_SM_CLEANUP
:
175 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
176 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
177 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
179 bfa_ioim_notify_cleanup(ioim
);
182 case BFA_IOIM_SM_ABORT
:
183 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
184 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
185 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
189 case BFA_IOIM_SM_HWFAIL
:
190 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
191 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
192 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
197 bfa_sm_fault(ioim
->bfa
, event
);
205 bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
207 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
208 bfa_trc_fp(ioim
->bfa
, event
);
211 case BFA_IOIM_SM_COMP_GOOD
:
212 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
213 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
214 __bfa_cb_ioim_good_comp
, ioim
);
217 case BFA_IOIM_SM_COMP
:
218 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
219 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
223 case BFA_IOIM_SM_DONE
:
224 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
225 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
229 case BFA_IOIM_SM_ABORT
:
230 ioim
->iosp
->abort_explicit
= BFA_TRUE
;
231 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
233 if (bfa_ioim_send_abort(ioim
))
234 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
236 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort_qfull
);
237 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
238 &ioim
->iosp
->reqq_wait
);
242 case BFA_IOIM_SM_CLEANUP
:
243 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
244 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
246 if (bfa_ioim_send_abort(ioim
))
247 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
249 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
250 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
251 &ioim
->iosp
->reqq_wait
);
255 case BFA_IOIM_SM_HWFAIL
:
256 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
257 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
262 bfa_sm_fault(ioim
->bfa
, event
);
267 * IO is being aborted, waiting for completion from firmware.
270 bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
272 bfa_trc(ioim
->bfa
, ioim
->iotag
);
273 bfa_trc(ioim
->bfa
, event
);
276 case BFA_IOIM_SM_COMP_GOOD
:
277 case BFA_IOIM_SM_COMP
:
278 case BFA_IOIM_SM_DONE
:
279 case BFA_IOIM_SM_FREE
:
282 case BFA_IOIM_SM_ABORT_DONE
:
283 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
284 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
288 case BFA_IOIM_SM_ABORT_COMP
:
289 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
290 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
294 case BFA_IOIM_SM_COMP_UTAG
:
295 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
296 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
300 case BFA_IOIM_SM_CLEANUP
:
301 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
302 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
304 if (bfa_ioim_send_abort(ioim
))
305 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
307 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
308 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
309 &ioim
->iosp
->reqq_wait
);
313 case BFA_IOIM_SM_HWFAIL
:
314 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
315 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
320 bfa_sm_fault(ioim
->bfa
, event
);
325 * IO is being cleaned up (implicit abort), waiting for completion from
329 bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
331 bfa_trc(ioim
->bfa
, ioim
->iotag
);
332 bfa_trc(ioim
->bfa
, event
);
335 case BFA_IOIM_SM_COMP_GOOD
:
336 case BFA_IOIM_SM_COMP
:
337 case BFA_IOIM_SM_DONE
:
338 case BFA_IOIM_SM_FREE
:
341 case BFA_IOIM_SM_ABORT
:
343 * IO is already being aborted implicitly
345 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
348 case BFA_IOIM_SM_ABORT_DONE
:
349 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
350 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
351 bfa_ioim_notify_cleanup(ioim
);
354 case BFA_IOIM_SM_ABORT_COMP
:
355 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
356 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
357 bfa_ioim_notify_cleanup(ioim
);
360 case BFA_IOIM_SM_COMP_UTAG
:
361 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
362 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
363 bfa_ioim_notify_cleanup(ioim
);
366 case BFA_IOIM_SM_HWFAIL
:
367 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
368 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
372 case BFA_IOIM_SM_CLEANUP
:
374 * IO can be in cleanup state already due to TM command. 2nd cleanup
375 * request comes from ITN offline event.
380 bfa_sm_fault(ioim
->bfa
, event
);
385 * IO is waiting for room in request CQ
388 bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
390 bfa_trc(ioim
->bfa
, ioim
->iotag
);
391 bfa_trc(ioim
->bfa
, event
);
394 case BFA_IOIM_SM_QRESUME
:
395 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
396 bfa_ioim_send_ioreq(ioim
);
399 case BFA_IOIM_SM_ABORT
:
400 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
401 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
402 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
406 case BFA_IOIM_SM_CLEANUP
:
407 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
408 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
409 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
411 bfa_ioim_notify_cleanup(ioim
);
414 case BFA_IOIM_SM_HWFAIL
:
415 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
416 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
417 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
422 bfa_sm_fault(ioim
->bfa
, event
);
427 * Active IO is being aborted, waiting for room in request CQ.
430 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
432 bfa_trc(ioim
->bfa
, ioim
->iotag
);
433 bfa_trc(ioim
->bfa
, event
);
436 case BFA_IOIM_SM_QRESUME
:
437 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
438 bfa_ioim_send_abort(ioim
);
441 case BFA_IOIM_SM_CLEANUP
:
442 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
443 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
444 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
447 case BFA_IOIM_SM_COMP_GOOD
:
448 case BFA_IOIM_SM_COMP
:
449 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
450 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
451 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
455 case BFA_IOIM_SM_DONE
:
456 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
457 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
458 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
462 case BFA_IOIM_SM_HWFAIL
:
463 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
464 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
465 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
470 bfa_sm_fault(ioim
->bfa
, event
);
475 * Active IO is being cleaned up, waiting for room in request CQ.
478 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
480 bfa_trc(ioim
->bfa
, ioim
->iotag
);
481 bfa_trc(ioim
->bfa
, event
);
484 case BFA_IOIM_SM_QRESUME
:
485 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
486 bfa_ioim_send_abort(ioim
);
489 case BFA_IOIM_SM_ABORT
:
491 * IO is alraedy being cleaned up implicitly
493 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
496 case BFA_IOIM_SM_COMP_GOOD
:
497 case BFA_IOIM_SM_COMP
:
498 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
499 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
500 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
501 bfa_ioim_notify_cleanup(ioim
);
504 case BFA_IOIM_SM_DONE
:
505 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
506 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
507 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
508 bfa_ioim_notify_cleanup(ioim
);
511 case BFA_IOIM_SM_HWFAIL
:
512 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
513 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
514 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
519 bfa_sm_fault(ioim
->bfa
, event
);
524 * IO bfa callback is pending.
527 bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
529 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
530 bfa_trc_fp(ioim
->bfa
, event
);
533 case BFA_IOIM_SM_HCB
:
534 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
536 bfa_cb_ioim_resfree(ioim
->bfa
->bfad
);
539 case BFA_IOIM_SM_CLEANUP
:
540 bfa_ioim_notify_cleanup(ioim
);
543 case BFA_IOIM_SM_HWFAIL
:
547 bfa_sm_fault(ioim
->bfa
, event
);
552 * IO bfa callback is pending. IO resource cannot be freed.
555 bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
557 bfa_trc(ioim
->bfa
, ioim
->iotag
);
558 bfa_trc(ioim
->bfa
, event
);
561 case BFA_IOIM_SM_HCB
:
562 bfa_sm_set_state(ioim
, bfa_ioim_sm_resfree
);
564 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_resfree_q
);
567 case BFA_IOIM_SM_FREE
:
568 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
571 case BFA_IOIM_SM_CLEANUP
:
572 bfa_ioim_notify_cleanup(ioim
);
575 case BFA_IOIM_SM_HWFAIL
:
576 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
580 bfa_sm_fault(ioim
->bfa
, event
);
585 * IO is completed, waiting resource free from firmware.
588 bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
590 bfa_trc(ioim
->bfa
, ioim
->iotag
);
591 bfa_trc(ioim
->bfa
, event
);
594 case BFA_IOIM_SM_FREE
:
595 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
597 bfa_cb_ioim_resfree(ioim
->bfa
->bfad
);
600 case BFA_IOIM_SM_CLEANUP
:
601 bfa_ioim_notify_cleanup(ioim
);
604 case BFA_IOIM_SM_HWFAIL
:
608 bfa_sm_fault(ioim
->bfa
, event
);
619 __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
)
621 struct bfa_ioim_s
*ioim
= cbarg
;
624 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
628 bfa_cb_ioim_good_comp(ioim
->bfa
->bfad
, ioim
->dio
);
632 __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
)
634 struct bfa_ioim_s
*ioim
= cbarg
;
635 struct bfi_ioim_rsp_s
*m
;
641 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
645 m
= (struct bfi_ioim_rsp_s
*) &ioim
->iosp
->comp_rspmsg
;
646 if (m
->io_status
== BFI_IOIM_STS_OK
) {
648 * setup sense information, if present
650 if (m
->scsi_status
== SCSI_STATUS_CHECK_CONDITION
652 sns_len
= m
->sns_len
;
653 snsinfo
= ioim
->iosp
->snsinfo
;
657 * setup residue value correctly for normal completions
659 if (m
->resid_flags
== FCP_RESID_UNDER
)
660 residue
= bfa_os_ntohl(m
->residue
);
661 if (m
->resid_flags
== FCP_RESID_OVER
) {
662 residue
= bfa_os_ntohl(m
->residue
);
667 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, m
->io_status
,
668 m
->scsi_status
, sns_len
, snsinfo
, residue
);
672 __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
)
674 struct bfa_ioim_s
*ioim
= cbarg
;
677 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
681 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_ABORTED
,
686 __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
)
688 struct bfa_ioim_s
*ioim
= cbarg
;
691 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
695 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_PATHTOV
,
700 __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
)
702 struct bfa_ioim_s
*ioim
= cbarg
;
705 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
709 bfa_cb_ioim_abort(ioim
->bfa
->bfad
, ioim
->dio
);
713 bfa_ioim_sgpg_alloced(void *cbarg
)
715 struct bfa_ioim_s
*ioim
= cbarg
;
717 ioim
->nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
718 list_splice_tail_init(&ioim
->iosp
->sgpg_wqe
.sgpg_q
, &ioim
->sgpg_q
);
719 bfa_ioim_sgpg_setup(ioim
);
720 bfa_sm_send_event(ioim
, BFA_IOIM_SM_SGALLOCED
);
724 * Send I/O request to firmware.
727 bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
)
729 struct bfa_itnim_s
*itnim
= ioim
->itnim
;
730 struct bfi_ioim_req_s
*m
;
731 static struct fcp_cmnd_s cmnd_z0
= { 0 };
732 struct bfi_sge_s
*sge
;
735 struct scatterlist
*sg
;
736 struct scsi_cmnd
*cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
739 * check for room in queue to send request now
741 m
= bfa_reqq_next(ioim
->bfa
, itnim
->reqq
);
743 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
744 &ioim
->iosp
->reqq_wait
);
749 * build i/o request message next
751 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
752 m
->rport_hdl
= ioim
->itnim
->rport
->fw_handle
;
753 m
->io_timeout
= bfa_cb_ioim_get_timeout(ioim
->dio
);
756 * build inline IO SG element here
760 sg
= (struct scatterlist
*)scsi_sglist(cmnd
);
761 addr
= bfa_os_sgaddr(sg_dma_address(sg
));
762 sge
->sga
= *(union bfi_addr_u
*) &addr
;
763 pgdlen
= sg_dma_len(sg
);
764 sge
->sg_len
= pgdlen
;
765 sge
->flags
= (ioim
->nsges
> BFI_SGE_INLINE
) ?
766 BFI_SGE_DATA_CPL
: BFI_SGE_DATA_LAST
;
771 if (ioim
->nsges
> BFI_SGE_INLINE
) {
772 sge
->sga
= ioim
->sgpg
->sgpg_pa
;
774 sge
->sga
.a32
.addr_lo
= 0;
775 sge
->sga
.a32
.addr_hi
= 0;
777 sge
->sg_len
= pgdlen
;
778 sge
->flags
= BFI_SGE_PGDLEN
;
782 * set up I/O command parameters
784 bfa_os_assign(m
->cmnd
, cmnd_z0
);
785 m
->cmnd
.lun
= bfa_cb_ioim_get_lun(ioim
->dio
);
786 m
->cmnd
.iodir
= bfa_cb_ioim_get_iodir(ioim
->dio
);
787 bfa_os_assign(m
->cmnd
.cdb
,
788 *(struct scsi_cdb_s
*)bfa_cb_ioim_get_cdb(ioim
->dio
));
789 m
->cmnd
.fcp_dl
= bfa_os_htonl(bfa_cb_ioim_get_size(ioim
->dio
));
792 * set up I/O message header
794 switch (m
->cmnd
.iodir
) {
796 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_READ
, 0, bfa_lpuid(ioim
->bfa
));
797 bfa_stats(itnim
, input_reqs
);
799 case FCP_IODIR_WRITE
:
800 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_WRITE
, 0, bfa_lpuid(ioim
->bfa
));
801 bfa_stats(itnim
, output_reqs
);
804 bfa_stats(itnim
, input_reqs
);
805 bfa_stats(itnim
, output_reqs
);
807 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
809 if (itnim
->seq_rec
||
810 (bfa_cb_ioim_get_size(ioim
->dio
) & (sizeof(u32
) - 1)))
811 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
814 m
->cmnd
.crn
= bfa_cb_ioim_get_crn(ioim
->dio
);
815 m
->cmnd
.priority
= bfa_cb_ioim_get_priority(ioim
->dio
);
816 m
->cmnd
.taskattr
= bfa_cb_ioim_get_taskattr(ioim
->dio
);
819 * Handle large CDB (>16 bytes).
821 m
->cmnd
.addl_cdb_len
= (bfa_cb_ioim_get_cdblen(ioim
->dio
) -
822 FCP_CMND_CDB_LEN
) / sizeof(u32
);
823 if (m
->cmnd
.addl_cdb_len
) {
824 bfa_os_memcpy(&m
->cmnd
.cdb
+ 1, (struct scsi_cdb_s
*)
825 bfa_cb_ioim_get_cdb(ioim
->dio
) + 1,
826 m
->cmnd
.addl_cdb_len
* sizeof(u32
));
827 fcp_cmnd_fcpdl(&m
->cmnd
) =
828 bfa_os_htonl(bfa_cb_ioim_get_size(ioim
->dio
));
833 * queue I/O message to firmware
835 bfa_reqq_produce(ioim
->bfa
, itnim
->reqq
);
840 * Setup any additional SG pages needed.Inline SG element is setup
844 bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
)
848 bfa_assert(ioim
->nsges
> BFI_SGE_INLINE
);
851 * allocate SG pages needed
853 nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
857 if (bfa_sgpg_malloc(ioim
->bfa
, &ioim
->sgpg_q
, nsgpgs
)
859 bfa_sgpg_wait(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
, nsgpgs
);
863 ioim
->nsgpgs
= nsgpgs
;
864 bfa_ioim_sgpg_setup(ioim
);
870 bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
)
873 struct bfi_sge_s
*sge
;
874 struct bfa_sgpg_s
*sgpg
;
877 struct scatterlist
*sg
;
878 struct scsi_cmnd
*cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
880 sgeid
= BFI_SGE_INLINE
;
881 ioim
->sgpg
= sgpg
= bfa_q_first(&ioim
->sgpg_q
);
883 sg
= scsi_sglist(cmnd
);
887 sge
= sgpg
->sgpg
->sges
;
888 nsges
= ioim
->nsges
- sgeid
;
889 if (nsges
> BFI_SGPG_DATA_SGES
)
890 nsges
= BFI_SGPG_DATA_SGES
;
893 for (i
= 0; i
< nsges
; i
++, sge
++, sgeid
++, sg
= sg_next(sg
)) {
894 addr
= bfa_os_sgaddr(sg_dma_address(sg
));
895 sge
->sga
= *(union bfi_addr_u
*) &addr
;
896 sge
->sg_len
= sg_dma_len(sg
);
897 pgcumsz
+= sge
->sg_len
;
903 sge
->flags
= BFI_SGE_DATA
;
904 else if (sgeid
< (ioim
->nsges
- 1))
905 sge
->flags
= BFI_SGE_DATA_CPL
;
907 sge
->flags
= BFI_SGE_DATA_LAST
;
910 sgpg
= (struct bfa_sgpg_s
*) bfa_q_next(sgpg
);
913 * set the link element of each page
915 if (sgeid
== ioim
->nsges
) {
916 sge
->flags
= BFI_SGE_PGDLEN
;
917 sge
->sga
.a32
.addr_lo
= 0;
918 sge
->sga
.a32
.addr_hi
= 0;
920 sge
->flags
= BFI_SGE_LINK
;
921 sge
->sga
= sgpg
->sgpg_pa
;
923 sge
->sg_len
= pgcumsz
;
924 } while (sgeid
< ioim
->nsges
);
928 * Send I/O abort request to firmware.
931 bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
)
933 struct bfa_itnim_s
*itnim
= ioim
->itnim
;
934 struct bfi_ioim_abort_req_s
*m
;
935 enum bfi_ioim_h2i msgop
;
938 * check for room in queue to send request now
940 m
= bfa_reqq_next(ioim
->bfa
, itnim
->reqq
);
945 * build i/o request message next
947 if (ioim
->iosp
->abort_explicit
)
948 msgop
= BFI_IOIM_H2I_IOABORT_REQ
;
950 msgop
= BFI_IOIM_H2I_IOCLEANUP_REQ
;
952 bfi_h2i_set(m
->mh
, BFI_MC_IOIM
, msgop
, bfa_lpuid(ioim
->bfa
));
953 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
954 m
->abort_tag
= ++ioim
->abort_tag
;
957 * queue I/O message to firmware
959 bfa_reqq_produce(ioim
->bfa
, itnim
->reqq
);
964 * Call to resume any I/O requests waiting for room in request queue.
967 bfa_ioim_qresume(void *cbarg
)
969 struct bfa_ioim_s
*ioim
= cbarg
;
971 bfa_fcpim_stats(ioim
->fcpim
, qresumes
);
972 bfa_sm_send_event(ioim
, BFA_IOIM_SM_QRESUME
);
977 bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
)
980 * Move IO from itnim queue to fcpim global queue since itnim will be
984 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
986 if (!ioim
->iosp
->tskim
) {
987 if (ioim
->fcpim
->delay_comp
&& ioim
->itnim
->iotov_active
) {
988 bfa_cb_dequeue(&ioim
->hcb_qe
);
990 list_add_tail(&ioim
->qe
, &ioim
->itnim
->delay_comp_q
);
992 bfa_itnim_iodone(ioim
->itnim
);
994 bfa_tskim_iodone(ioim
->iosp
->tskim
);
998 * or after the link comes back.
1001 bfa_ioim_delayed_comp(struct bfa_ioim_s
*ioim
, bfa_boolean_t iotov
)
1004 * If path tov timer expired, failback with PATHTOV status - these
1005 * IO requests are not normally retried by IO stack.
1007 * Otherwise device cameback online and fail it with normal failed
1008 * status so that IO stack retries these failed IO requests.
1011 ioim
->io_cbfn
= __bfa_cb_ioim_pathtov
;
1013 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
1015 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1018 * Move IO to fcpim global queue since itnim will be
1021 list_del(&ioim
->qe
);
1022 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
1032 * Memory allocation and initialization.
1035 bfa_ioim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
1037 struct bfa_ioim_s
*ioim
;
1038 struct bfa_ioim_sp_s
*iosp
;
1044 * claim memory first
1046 ioim
= (struct bfa_ioim_s
*) bfa_meminfo_kva(minfo
);
1047 fcpim
->ioim_arr
= ioim
;
1048 bfa_meminfo_kva(minfo
) = (u8
*) (ioim
+ fcpim
->num_ioim_reqs
);
1050 iosp
= (struct bfa_ioim_sp_s
*) bfa_meminfo_kva(minfo
);
1051 fcpim
->ioim_sp_arr
= iosp
;
1052 bfa_meminfo_kva(minfo
) = (u8
*) (iosp
+ fcpim
->num_ioim_reqs
);
1055 * Claim DMA memory for per IO sense data.
1057 snsbufsz
= fcpim
->num_ioim_reqs
* BFI_IOIM_SNSLEN
;
1058 fcpim
->snsbase
.pa
= bfa_meminfo_dma_phys(minfo
);
1059 bfa_meminfo_dma_phys(minfo
) += snsbufsz
;
1061 fcpim
->snsbase
.kva
= bfa_meminfo_dma_virt(minfo
);
1062 bfa_meminfo_dma_virt(minfo
) += snsbufsz
;
1063 snsinfo
= fcpim
->snsbase
.kva
;
1064 bfa_iocfc_set_snsbase(fcpim
->bfa
, fcpim
->snsbase
.pa
);
1067 * Initialize ioim free queues
1069 INIT_LIST_HEAD(&fcpim
->ioim_free_q
);
1070 INIT_LIST_HEAD(&fcpim
->ioim_resfree_q
);
1071 INIT_LIST_HEAD(&fcpim
->ioim_comp_q
);
1073 for (i
= 0; i
< fcpim
->num_ioim_reqs
;
1074 i
++, ioim
++, iosp
++, snsinfo
+= BFI_IOIM_SNSLEN
) {
1078 bfa_os_memset(ioim
, 0, sizeof(struct bfa_ioim_s
));
1080 ioim
->bfa
= fcpim
->bfa
;
1081 ioim
->fcpim
= fcpim
;
1083 iosp
->snsinfo
= snsinfo
;
1084 INIT_LIST_HEAD(&ioim
->sgpg_q
);
1085 bfa_reqq_winit(&ioim
->iosp
->reqq_wait
,
1086 bfa_ioim_qresume
, ioim
);
1087 bfa_sgpg_winit(&ioim
->iosp
->sgpg_wqe
,
1088 bfa_ioim_sgpg_alloced
, ioim
);
1089 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
1091 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
1096 * Driver detach time call.
1099 bfa_ioim_detach(struct bfa_fcpim_mod_s
*fcpim
)
1104 bfa_ioim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1106 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1107 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
1108 struct bfa_ioim_s
*ioim
;
1110 enum bfa_ioim_event evt
= BFA_IOIM_SM_COMP
;
1112 iotag
= bfa_os_ntohs(rsp
->io_tag
);
1114 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
1115 bfa_assert(ioim
->iotag
== iotag
);
1117 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1118 bfa_trc(ioim
->bfa
, rsp
->io_status
);
1119 bfa_trc(ioim
->bfa
, rsp
->reuse_io_tag
);
1121 if (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_active
))
1122 bfa_os_assign(ioim
->iosp
->comp_rspmsg
, *m
);
1124 switch (rsp
->io_status
) {
1125 case BFI_IOIM_STS_OK
:
1126 bfa_fcpim_stats(fcpim
, iocomp_ok
);
1127 if (rsp
->reuse_io_tag
== 0)
1128 evt
= BFA_IOIM_SM_DONE
;
1130 evt
= BFA_IOIM_SM_COMP
;
1133 case BFI_IOIM_STS_TIMEDOUT
:
1134 case BFI_IOIM_STS_ABORTED
:
1135 rsp
->io_status
= BFI_IOIM_STS_ABORTED
;
1136 bfa_fcpim_stats(fcpim
, iocomp_aborted
);
1137 if (rsp
->reuse_io_tag
== 0)
1138 evt
= BFA_IOIM_SM_DONE
;
1140 evt
= BFA_IOIM_SM_COMP
;
1143 case BFI_IOIM_STS_PROTO_ERR
:
1144 bfa_fcpim_stats(fcpim
, iocom_proto_err
);
1145 bfa_assert(rsp
->reuse_io_tag
);
1146 evt
= BFA_IOIM_SM_COMP
;
1149 case BFI_IOIM_STS_SQER_NEEDED
:
1150 bfa_fcpim_stats(fcpim
, iocom_sqer_needed
);
1151 bfa_assert(rsp
->reuse_io_tag
== 0);
1152 evt
= BFA_IOIM_SM_SQRETRY
;
1155 case BFI_IOIM_STS_RES_FREE
:
1156 bfa_fcpim_stats(fcpim
, iocom_res_free
);
1157 evt
= BFA_IOIM_SM_FREE
;
1160 case BFI_IOIM_STS_HOST_ABORTED
:
1161 bfa_fcpim_stats(fcpim
, iocom_hostabrts
);
1162 if (rsp
->abort_tag
!= ioim
->abort_tag
) {
1163 bfa_trc(ioim
->bfa
, rsp
->abort_tag
);
1164 bfa_trc(ioim
->bfa
, ioim
->abort_tag
);
1168 if (rsp
->reuse_io_tag
)
1169 evt
= BFA_IOIM_SM_ABORT_COMP
;
1171 evt
= BFA_IOIM_SM_ABORT_DONE
;
1174 case BFI_IOIM_STS_UTAG
:
1175 bfa_fcpim_stats(fcpim
, iocom_utags
);
1176 evt
= BFA_IOIM_SM_COMP_UTAG
;
1183 bfa_sm_send_event(ioim
, evt
);
1187 bfa_ioim_good_comp_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1189 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1190 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
1191 struct bfa_ioim_s
*ioim
;
1194 iotag
= bfa_os_ntohs(rsp
->io_tag
);
1196 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
1197 bfa_assert(ioim
->iotag
== iotag
);
1199 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1200 bfa_sm_send_event(ioim
, BFA_IOIM_SM_COMP_GOOD
);
1204 * Called by itnim to clean up IO while going offline.
1207 bfa_ioim_cleanup(struct bfa_ioim_s
*ioim
)
1209 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1210 bfa_fcpim_stats(ioim
->fcpim
, io_cleanups
);
1212 ioim
->iosp
->tskim
= NULL
;
1213 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
1217 bfa_ioim_cleanup_tm(struct bfa_ioim_s
*ioim
, struct bfa_tskim_s
*tskim
)
1219 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1220 bfa_fcpim_stats(ioim
->fcpim
, io_tmaborts
);
1222 ioim
->iosp
->tskim
= tskim
;
1223 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
1227 * IOC failure handling.
1230 bfa_ioim_iocdisable(struct bfa_ioim_s
*ioim
)
1232 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HWFAIL
);
1236 * IO offline TOV popped. Fail the pending IO.
1239 bfa_ioim_tov(struct bfa_ioim_s
*ioim
)
1241 bfa_sm_send_event(ioim
, BFA_IOIM_SM_IOTOV
);
1251 * Allocate IOIM resource for initiator mode I/O request.
1254 bfa_ioim_alloc(struct bfa_s
*bfa
, struct bfad_ioim_s
*dio
,
1255 struct bfa_itnim_s
*itnim
, u16 nsges
)
1257 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1258 struct bfa_ioim_s
*ioim
;
1261 * alocate IOIM resource
1263 bfa_q_deq(&fcpim
->ioim_free_q
, &ioim
);
1265 bfa_fcpim_stats(fcpim
, no_iotags
);
1270 ioim
->itnim
= itnim
;
1271 ioim
->nsges
= nsges
;
1274 bfa_stats(fcpim
, total_ios
);
1275 bfa_stats(itnim
, ios
);
1276 fcpim
->ios_active
++;
1278 list_add_tail(&ioim
->qe
, &itnim
->io_q
);
1279 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1285 bfa_ioim_free(struct bfa_ioim_s
*ioim
)
1287 struct bfa_fcpim_mod_s
*fcpim
= ioim
->fcpim
;
1289 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1290 bfa_assert_fp(bfa_sm_cmp_state(ioim
, bfa_ioim_sm_uninit
));
1292 bfa_assert_fp(list_empty(&ioim
->sgpg_q
)
1293 || (ioim
->nsges
> BFI_SGE_INLINE
));
1295 if (ioim
->nsgpgs
> 0)
1296 bfa_sgpg_mfree(ioim
->bfa
, &ioim
->sgpg_q
, ioim
->nsgpgs
);
1298 bfa_stats(ioim
->itnim
, io_comps
);
1299 fcpim
->ios_active
--;
1301 list_del(&ioim
->qe
);
1302 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
1306 bfa_ioim_start(struct bfa_ioim_s
*ioim
)
1308 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1309 bfa_sm_send_event(ioim
, BFA_IOIM_SM_START
);
1313 * Driver I/O abort request.
1316 bfa_ioim_abort(struct bfa_ioim_s
*ioim
)
1318 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1319 bfa_fcpim_stats(ioim
->fcpim
, io_aborts
);
1320 bfa_sm_send_event(ioim
, BFA_IOIM_SM_ABORT
);