2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include <cs/bfa_debug.h>
20 #include <bfa_cb_ioim_macros.h>
22 BFA_TRC_FILE(HAL
, IOIM
);
25 * forward declarations.
27 static bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
);
28 static bfa_boolean_t
bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
);
29 static void bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
);
30 static bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
);
31 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
);
32 static void __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
);
33 static void __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
);
34 static void __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
);
35 static void __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
);
36 static void __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
);
43 * IO state machine events
46 BFA_IOIM_SM_START
= 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD
= 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP
= 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG
= 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE
= 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE
= 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT
= 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP
= 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE
= 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME
= 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED
= 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY
= 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB
= 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP
= 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART
= 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE
= 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL
= 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV
= 18, /* ITN offline TOV */
67 * forward declaration of IO state machine
69 static void bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
,
70 enum bfa_ioim_event event
);
71 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
,
72 enum bfa_ioim_event event
);
73 static void bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
,
74 enum bfa_ioim_event event
);
75 static void bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
,
76 enum bfa_ioim_event event
);
77 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
,
78 enum bfa_ioim_event event
);
79 static void bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
,
80 enum bfa_ioim_event event
);
81 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
,
82 enum bfa_ioim_event event
);
83 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
,
84 enum bfa_ioim_event event
);
85 static void bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
,
86 enum bfa_ioim_event event
);
87 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
,
88 enum bfa_ioim_event event
);
89 static void bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
,
90 enum bfa_ioim_event event
);
93 * IO is not started (unallocated).
96 bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
98 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
99 bfa_trc_fp(ioim
->bfa
, event
);
102 case BFA_IOIM_SM_START
:
103 if (!bfa_itnim_is_online(ioim
->itnim
)) {
104 if (!bfa_itnim_hold_io(ioim
->itnim
)) {
105 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
107 list_add_tail(&ioim
->qe
,
108 &ioim
->fcpim
->ioim_comp_q
);
109 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
110 __bfa_cb_ioim_pathtov
, ioim
);
113 list_add_tail(&ioim
->qe
,
114 &ioim
->itnim
->pending_q
);
119 if (ioim
->nsges
> BFI_SGE_INLINE
) {
120 if (!bfa_ioim_sge_setup(ioim
)) {
121 bfa_sm_set_state(ioim
, bfa_ioim_sm_sgalloc
);
126 if (!bfa_ioim_send_ioreq(ioim
)) {
127 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
131 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
134 case BFA_IOIM_SM_IOTOV
:
135 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
137 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
138 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
139 __bfa_cb_ioim_pathtov
, ioim
);
142 case BFA_IOIM_SM_ABORT
:
144 * IO in pending queue can get abort requests. Complete abort
145 * requests immediately.
147 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
148 bfa_assert(bfa_q_is_on_q(&ioim
->itnim
->pending_q
, ioim
));
149 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
154 bfa_sm_fault(ioim
->bfa
, event
);
159 * IO is waiting for SG pages.
162 bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
164 bfa_trc(ioim
->bfa
, ioim
->iotag
);
165 bfa_trc(ioim
->bfa
, event
);
168 case BFA_IOIM_SM_SGALLOCED
:
169 if (!bfa_ioim_send_ioreq(ioim
)) {
170 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
173 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
176 case BFA_IOIM_SM_CLEANUP
:
177 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
178 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
179 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
181 bfa_ioim_notify_cleanup(ioim
);
184 case BFA_IOIM_SM_ABORT
:
185 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
186 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
188 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
189 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
193 case BFA_IOIM_SM_HWFAIL
:
194 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
195 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
197 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
198 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
203 bfa_sm_fault(ioim
->bfa
, event
);
211 bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
213 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
214 bfa_trc_fp(ioim
->bfa
, event
);
217 case BFA_IOIM_SM_COMP_GOOD
:
218 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
220 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
221 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
222 __bfa_cb_ioim_good_comp
, ioim
);
225 case BFA_IOIM_SM_COMP
:
226 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
228 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
229 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
233 case BFA_IOIM_SM_DONE
:
234 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
236 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
237 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
241 case BFA_IOIM_SM_ABORT
:
242 ioim
->iosp
->abort_explicit
= BFA_TRUE
;
243 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
245 if (bfa_ioim_send_abort(ioim
))
246 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
248 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort_qfull
);
249 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
250 &ioim
->iosp
->reqq_wait
);
254 case BFA_IOIM_SM_CLEANUP
:
255 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
256 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
258 if (bfa_ioim_send_abort(ioim
))
259 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
261 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
262 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
263 &ioim
->iosp
->reqq_wait
);
267 case BFA_IOIM_SM_HWFAIL
:
268 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
270 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
271 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
276 bfa_sm_fault(ioim
->bfa
, event
);
281 * IO is being aborted, waiting for completion from firmware.
284 bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
286 bfa_trc(ioim
->bfa
, ioim
->iotag
);
287 bfa_trc(ioim
->bfa
, event
);
290 case BFA_IOIM_SM_COMP_GOOD
:
291 case BFA_IOIM_SM_COMP
:
292 case BFA_IOIM_SM_DONE
:
293 case BFA_IOIM_SM_FREE
:
296 case BFA_IOIM_SM_ABORT_DONE
:
297 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
298 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
302 case BFA_IOIM_SM_ABORT_COMP
:
303 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
305 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
306 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
310 case BFA_IOIM_SM_COMP_UTAG
:
311 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
313 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
314 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
318 case BFA_IOIM_SM_CLEANUP
:
319 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
320 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
322 if (bfa_ioim_send_abort(ioim
))
323 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
325 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
326 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
327 &ioim
->iosp
->reqq_wait
);
331 case BFA_IOIM_SM_HWFAIL
:
332 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
334 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
335 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
340 bfa_sm_fault(ioim
->bfa
, event
);
345 * IO is being cleaned up (implicit abort), waiting for completion from
349 bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
351 bfa_trc(ioim
->bfa
, ioim
->iotag
);
352 bfa_trc(ioim
->bfa
, event
);
355 case BFA_IOIM_SM_COMP_GOOD
:
356 case BFA_IOIM_SM_COMP
:
357 case BFA_IOIM_SM_DONE
:
358 case BFA_IOIM_SM_FREE
:
361 case BFA_IOIM_SM_ABORT
:
363 * IO is already being aborted implicitly
365 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
368 case BFA_IOIM_SM_ABORT_DONE
:
369 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
370 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
371 bfa_ioim_notify_cleanup(ioim
);
374 case BFA_IOIM_SM_ABORT_COMP
:
375 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
376 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
377 bfa_ioim_notify_cleanup(ioim
);
380 case BFA_IOIM_SM_COMP_UTAG
:
381 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
382 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
383 bfa_ioim_notify_cleanup(ioim
);
386 case BFA_IOIM_SM_HWFAIL
:
387 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
389 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
390 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
394 case BFA_IOIM_SM_CLEANUP
:
396 * IO can be in cleanup state already due to TM command. 2nd cleanup
397 * request comes from ITN offline event.
402 bfa_sm_fault(ioim
->bfa
, event
);
407 * IO is waiting for room in request CQ
410 bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
412 bfa_trc(ioim
->bfa
, ioim
->iotag
);
413 bfa_trc(ioim
->bfa
, event
);
416 case BFA_IOIM_SM_QRESUME
:
417 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
418 bfa_ioim_send_ioreq(ioim
);
421 case BFA_IOIM_SM_ABORT
:
422 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
423 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
425 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
426 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
430 case BFA_IOIM_SM_CLEANUP
:
431 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
432 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
433 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
435 bfa_ioim_notify_cleanup(ioim
);
438 case BFA_IOIM_SM_HWFAIL
:
439 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
440 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
442 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
443 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
448 bfa_sm_fault(ioim
->bfa
, event
);
453 * Active IO is being aborted, waiting for room in request CQ.
456 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
458 bfa_trc(ioim
->bfa
, ioim
->iotag
);
459 bfa_trc(ioim
->bfa
, event
);
462 case BFA_IOIM_SM_QRESUME
:
463 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
464 bfa_ioim_send_abort(ioim
);
467 case BFA_IOIM_SM_CLEANUP
:
468 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
469 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
470 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
473 case BFA_IOIM_SM_COMP_GOOD
:
474 case BFA_IOIM_SM_COMP
:
475 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
476 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
478 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
479 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
483 case BFA_IOIM_SM_DONE
:
484 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
485 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
487 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
488 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
492 case BFA_IOIM_SM_HWFAIL
:
493 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
494 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
496 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
497 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
502 bfa_sm_fault(ioim
->bfa
, event
);
507 * Active IO is being cleaned up, waiting for room in request CQ.
510 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
512 bfa_trc(ioim
->bfa
, ioim
->iotag
);
513 bfa_trc(ioim
->bfa
, event
);
516 case BFA_IOIM_SM_QRESUME
:
517 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
518 bfa_ioim_send_abort(ioim
);
521 case BFA_IOIM_SM_ABORT
:
523 * IO is already being cleaned up implicitly
525 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
528 case BFA_IOIM_SM_COMP_GOOD
:
529 case BFA_IOIM_SM_COMP
:
530 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
531 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
532 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
533 bfa_ioim_notify_cleanup(ioim
);
536 case BFA_IOIM_SM_DONE
:
537 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
538 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
539 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
540 bfa_ioim_notify_cleanup(ioim
);
543 case BFA_IOIM_SM_HWFAIL
:
544 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
545 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
547 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
548 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
553 bfa_sm_fault(ioim
->bfa
, event
);
558 * IO bfa callback is pending.
561 bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
563 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
564 bfa_trc_fp(ioim
->bfa
, event
);
567 case BFA_IOIM_SM_HCB
:
568 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
570 bfa_cb_ioim_resfree(ioim
->bfa
->bfad
);
573 case BFA_IOIM_SM_CLEANUP
:
574 bfa_ioim_notify_cleanup(ioim
);
577 case BFA_IOIM_SM_HWFAIL
:
581 bfa_sm_fault(ioim
->bfa
, event
);
586 * IO bfa callback is pending. IO resource cannot be freed.
589 bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
591 bfa_trc(ioim
->bfa
, ioim
->iotag
);
592 bfa_trc(ioim
->bfa
, event
);
595 case BFA_IOIM_SM_HCB
:
596 bfa_sm_set_state(ioim
, bfa_ioim_sm_resfree
);
598 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_resfree_q
);
601 case BFA_IOIM_SM_FREE
:
602 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
605 case BFA_IOIM_SM_CLEANUP
:
606 bfa_ioim_notify_cleanup(ioim
);
609 case BFA_IOIM_SM_HWFAIL
:
610 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
614 bfa_sm_fault(ioim
->bfa
, event
);
619 * IO is completed, waiting resource free from firmware.
622 bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
624 bfa_trc(ioim
->bfa
, ioim
->iotag
);
625 bfa_trc(ioim
->bfa
, event
);
628 case BFA_IOIM_SM_FREE
:
629 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
631 bfa_cb_ioim_resfree(ioim
->bfa
->bfad
);
634 case BFA_IOIM_SM_CLEANUP
:
635 bfa_ioim_notify_cleanup(ioim
);
638 case BFA_IOIM_SM_HWFAIL
:
642 bfa_sm_fault(ioim
->bfa
, event
);
653 __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
)
655 struct bfa_ioim_s
*ioim
= cbarg
;
658 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
662 bfa_cb_ioim_good_comp(ioim
->bfa
->bfad
, ioim
->dio
);
666 __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
)
668 struct bfa_ioim_s
*ioim
= cbarg
;
669 struct bfi_ioim_rsp_s
*m
;
675 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
679 m
= (struct bfi_ioim_rsp_s
*) &ioim
->iosp
->comp_rspmsg
;
680 if (m
->io_status
== BFI_IOIM_STS_OK
) {
682 * setup sense information, if present
684 if (m
->scsi_status
== SCSI_STATUS_CHECK_CONDITION
686 sns_len
= m
->sns_len
;
687 snsinfo
= ioim
->iosp
->snsinfo
;
691 * setup residue value correctly for normal completions
693 if (m
->resid_flags
== FCP_RESID_UNDER
)
694 residue
= bfa_os_ntohl(m
->residue
);
695 if (m
->resid_flags
== FCP_RESID_OVER
) {
696 residue
= bfa_os_ntohl(m
->residue
);
701 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, m
->io_status
,
702 m
->scsi_status
, sns_len
, snsinfo
, residue
);
706 __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
)
708 struct bfa_ioim_s
*ioim
= cbarg
;
711 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
715 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_ABORTED
,
720 __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
)
722 struct bfa_ioim_s
*ioim
= cbarg
;
725 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
729 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_PATHTOV
,
734 __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
)
736 struct bfa_ioim_s
*ioim
= cbarg
;
739 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
743 bfa_cb_ioim_abort(ioim
->bfa
->bfad
, ioim
->dio
);
747 bfa_ioim_sgpg_alloced(void *cbarg
)
749 struct bfa_ioim_s
*ioim
= cbarg
;
751 ioim
->nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
752 list_splice_tail_init(&ioim
->iosp
->sgpg_wqe
.sgpg_q
, &ioim
->sgpg_q
);
753 bfa_ioim_sgpg_setup(ioim
);
754 bfa_sm_send_event(ioim
, BFA_IOIM_SM_SGALLOCED
);
758 * Send I/O request to firmware.
761 bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
)
763 struct bfa_itnim_s
*itnim
= ioim
->itnim
;
764 struct bfi_ioim_req_s
*m
;
765 static struct fcp_cmnd_s cmnd_z0
= { 0 };
766 struct bfi_sge_s
*sge
;
769 struct scatterlist
*sg
;
770 struct scsi_cmnd
*cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
773 * check for room in queue to send request now
775 m
= bfa_reqq_next(ioim
->bfa
, ioim
->reqq
);
777 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
778 &ioim
->iosp
->reqq_wait
);
783 * build i/o request message next
785 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
786 m
->rport_hdl
= ioim
->itnim
->rport
->fw_handle
;
787 m
->io_timeout
= bfa_cb_ioim_get_timeout(ioim
->dio
);
790 * build inline IO SG element here
794 sg
= (struct scatterlist
*)scsi_sglist(cmnd
);
795 addr
= bfa_os_sgaddr(sg_dma_address(sg
));
796 sge
->sga
= *(union bfi_addr_u
*) &addr
;
797 pgdlen
= sg_dma_len(sg
);
798 sge
->sg_len
= pgdlen
;
799 sge
->flags
= (ioim
->nsges
> BFI_SGE_INLINE
) ?
800 BFI_SGE_DATA_CPL
: BFI_SGE_DATA_LAST
;
805 if (ioim
->nsges
> BFI_SGE_INLINE
) {
806 sge
->sga
= ioim
->sgpg
->sgpg_pa
;
808 sge
->sga
.a32
.addr_lo
= 0;
809 sge
->sga
.a32
.addr_hi
= 0;
811 sge
->sg_len
= pgdlen
;
812 sge
->flags
= BFI_SGE_PGDLEN
;
816 * set up I/O command parameters
818 bfa_os_assign(m
->cmnd
, cmnd_z0
);
819 m
->cmnd
.lun
= bfa_cb_ioim_get_lun(ioim
->dio
);
820 m
->cmnd
.iodir
= bfa_cb_ioim_get_iodir(ioim
->dio
);
821 bfa_os_assign(m
->cmnd
.cdb
,
822 *(struct scsi_cdb_s
*)bfa_cb_ioim_get_cdb(ioim
->dio
));
823 m
->cmnd
.fcp_dl
= bfa_os_htonl(bfa_cb_ioim_get_size(ioim
->dio
));
826 * set up I/O message header
828 switch (m
->cmnd
.iodir
) {
830 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_READ
, 0, bfa_lpuid(ioim
->bfa
));
831 bfa_stats(itnim
, input_reqs
);
833 case FCP_IODIR_WRITE
:
834 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_WRITE
, 0, bfa_lpuid(ioim
->bfa
));
835 bfa_stats(itnim
, output_reqs
);
838 bfa_stats(itnim
, input_reqs
);
839 bfa_stats(itnim
, output_reqs
);
841 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
843 if (itnim
->seq_rec
||
844 (bfa_cb_ioim_get_size(ioim
->dio
) & (sizeof(u32
) - 1)))
845 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
848 m
->cmnd
.crn
= bfa_cb_ioim_get_crn(ioim
->dio
);
849 m
->cmnd
.priority
= bfa_cb_ioim_get_priority(ioim
->dio
);
850 m
->cmnd
.taskattr
= bfa_cb_ioim_get_taskattr(ioim
->dio
);
853 * Handle large CDB (>16 bytes).
855 m
->cmnd
.addl_cdb_len
= (bfa_cb_ioim_get_cdblen(ioim
->dio
) -
856 FCP_CMND_CDB_LEN
) / sizeof(u32
);
857 if (m
->cmnd
.addl_cdb_len
) {
858 bfa_os_memcpy(&m
->cmnd
.cdb
+ 1, (struct scsi_cdb_s
*)
859 bfa_cb_ioim_get_cdb(ioim
->dio
) + 1,
860 m
->cmnd
.addl_cdb_len
* sizeof(u32
));
861 fcp_cmnd_fcpdl(&m
->cmnd
) =
862 bfa_os_htonl(bfa_cb_ioim_get_size(ioim
->dio
));
867 * queue I/O message to firmware
869 bfa_reqq_produce(ioim
->bfa
, ioim
->reqq
);
874 * Setup any additional SG pages needed.Inline SG element is setup
878 bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
)
882 bfa_assert(ioim
->nsges
> BFI_SGE_INLINE
);
885 * allocate SG pages needed
887 nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
891 if (bfa_sgpg_malloc(ioim
->bfa
, &ioim
->sgpg_q
, nsgpgs
)
893 bfa_sgpg_wait(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
, nsgpgs
);
897 ioim
->nsgpgs
= nsgpgs
;
898 bfa_ioim_sgpg_setup(ioim
);
904 bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
)
907 struct bfi_sge_s
*sge
;
908 struct bfa_sgpg_s
*sgpg
;
911 struct scatterlist
*sg
;
912 struct scsi_cmnd
*cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
914 sgeid
= BFI_SGE_INLINE
;
915 ioim
->sgpg
= sgpg
= bfa_q_first(&ioim
->sgpg_q
);
917 sg
= scsi_sglist(cmnd
);
921 sge
= sgpg
->sgpg
->sges
;
922 nsges
= ioim
->nsges
- sgeid
;
923 if (nsges
> BFI_SGPG_DATA_SGES
)
924 nsges
= BFI_SGPG_DATA_SGES
;
927 for (i
= 0; i
< nsges
; i
++, sge
++, sgeid
++, sg
= sg_next(sg
)) {
928 addr
= bfa_os_sgaddr(sg_dma_address(sg
));
929 sge
->sga
= *(union bfi_addr_u
*) &addr
;
930 sge
->sg_len
= sg_dma_len(sg
);
931 pgcumsz
+= sge
->sg_len
;
937 sge
->flags
= BFI_SGE_DATA
;
938 else if (sgeid
< (ioim
->nsges
- 1))
939 sge
->flags
= BFI_SGE_DATA_CPL
;
941 sge
->flags
= BFI_SGE_DATA_LAST
;
944 sgpg
= (struct bfa_sgpg_s
*) bfa_q_next(sgpg
);
947 * set the link element of each page
949 if (sgeid
== ioim
->nsges
) {
950 sge
->flags
= BFI_SGE_PGDLEN
;
951 sge
->sga
.a32
.addr_lo
= 0;
952 sge
->sga
.a32
.addr_hi
= 0;
954 sge
->flags
= BFI_SGE_LINK
;
955 sge
->sga
= sgpg
->sgpg_pa
;
957 sge
->sg_len
= pgcumsz
;
958 } while (sgeid
< ioim
->nsges
);
962 * Send I/O abort request to firmware.
965 bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
)
967 struct bfi_ioim_abort_req_s
*m
;
968 enum bfi_ioim_h2i msgop
;
971 * check for room in queue to send request now
973 m
= bfa_reqq_next(ioim
->bfa
, ioim
->reqq
);
978 * build i/o request message next
980 if (ioim
->iosp
->abort_explicit
)
981 msgop
= BFI_IOIM_H2I_IOABORT_REQ
;
983 msgop
= BFI_IOIM_H2I_IOCLEANUP_REQ
;
985 bfi_h2i_set(m
->mh
, BFI_MC_IOIM
, msgop
, bfa_lpuid(ioim
->bfa
));
986 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
987 m
->abort_tag
= ++ioim
->abort_tag
;
990 * queue I/O message to firmware
992 bfa_reqq_produce(ioim
->bfa
, ioim
->reqq
);
997 * Call to resume any I/O requests waiting for room in request queue.
1000 bfa_ioim_qresume(void *cbarg
)
1002 struct bfa_ioim_s
*ioim
= cbarg
;
1004 bfa_fcpim_stats(ioim
->fcpim
, qresumes
);
1005 bfa_sm_send_event(ioim
, BFA_IOIM_SM_QRESUME
);
1010 bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
)
1013 * Move IO from itnim queue to fcpim global queue since itnim will be
1016 list_del(&ioim
->qe
);
1017 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
1019 if (!ioim
->iosp
->tskim
) {
1020 if (ioim
->fcpim
->delay_comp
&& ioim
->itnim
->iotov_active
) {
1021 bfa_cb_dequeue(&ioim
->hcb_qe
);
1022 list_del(&ioim
->qe
);
1023 list_add_tail(&ioim
->qe
, &ioim
->itnim
->delay_comp_q
);
1025 bfa_itnim_iodone(ioim
->itnim
);
1027 bfa_tskim_iodone(ioim
->iosp
->tskim
);
1031 * or after the link comes back.
1034 bfa_ioim_delayed_comp(struct bfa_ioim_s
*ioim
, bfa_boolean_t iotov
)
1037 * If path tov timer expired, failback with PATHTOV status - these
1038 * IO requests are not normally retried by IO stack.
1040 * Otherwise device cameback online and fail it with normal failed
1041 * status so that IO stack retries these failed IO requests.
1044 ioim
->io_cbfn
= __bfa_cb_ioim_pathtov
;
1046 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
1048 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1051 * Move IO to fcpim global queue since itnim will be
1054 list_del(&ioim
->qe
);
1055 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
1065 * Memory allocation and initialization.
1068 bfa_ioim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
1070 struct bfa_ioim_s
*ioim
;
1071 struct bfa_ioim_sp_s
*iosp
;
1077 * claim memory first
1079 ioim
= (struct bfa_ioim_s
*) bfa_meminfo_kva(minfo
);
1080 fcpim
->ioim_arr
= ioim
;
1081 bfa_meminfo_kva(minfo
) = (u8
*) (ioim
+ fcpim
->num_ioim_reqs
);
1083 iosp
= (struct bfa_ioim_sp_s
*) bfa_meminfo_kva(minfo
);
1084 fcpim
->ioim_sp_arr
= iosp
;
1085 bfa_meminfo_kva(minfo
) = (u8
*) (iosp
+ fcpim
->num_ioim_reqs
);
1088 * Claim DMA memory for per IO sense data.
1090 snsbufsz
= fcpim
->num_ioim_reqs
* BFI_IOIM_SNSLEN
;
1091 fcpim
->snsbase
.pa
= bfa_meminfo_dma_phys(minfo
);
1092 bfa_meminfo_dma_phys(minfo
) += snsbufsz
;
1094 fcpim
->snsbase
.kva
= bfa_meminfo_dma_virt(minfo
);
1095 bfa_meminfo_dma_virt(minfo
) += snsbufsz
;
1096 snsinfo
= fcpim
->snsbase
.kva
;
1097 bfa_iocfc_set_snsbase(fcpim
->bfa
, fcpim
->snsbase
.pa
);
1100 * Initialize ioim free queues
1102 INIT_LIST_HEAD(&fcpim
->ioim_free_q
);
1103 INIT_LIST_HEAD(&fcpim
->ioim_resfree_q
);
1104 INIT_LIST_HEAD(&fcpim
->ioim_comp_q
);
1106 for (i
= 0; i
< fcpim
->num_ioim_reqs
;
1107 i
++, ioim
++, iosp
++, snsinfo
+= BFI_IOIM_SNSLEN
) {
1111 bfa_os_memset(ioim
, 0, sizeof(struct bfa_ioim_s
));
1113 ioim
->bfa
= fcpim
->bfa
;
1114 ioim
->fcpim
= fcpim
;
1116 iosp
->snsinfo
= snsinfo
;
1117 INIT_LIST_HEAD(&ioim
->sgpg_q
);
1118 bfa_reqq_winit(&ioim
->iosp
->reqq_wait
,
1119 bfa_ioim_qresume
, ioim
);
1120 bfa_sgpg_winit(&ioim
->iosp
->sgpg_wqe
,
1121 bfa_ioim_sgpg_alloced
, ioim
);
1122 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
1124 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
1129 * Driver detach time call.
1132 bfa_ioim_detach(struct bfa_fcpim_mod_s
*fcpim
)
1137 bfa_ioim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1139 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1140 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
1141 struct bfa_ioim_s
*ioim
;
1143 enum bfa_ioim_event evt
= BFA_IOIM_SM_COMP
;
1145 iotag
= bfa_os_ntohs(rsp
->io_tag
);
1147 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
1148 bfa_assert(ioim
->iotag
== iotag
);
1150 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1151 bfa_trc(ioim
->bfa
, rsp
->io_status
);
1152 bfa_trc(ioim
->bfa
, rsp
->reuse_io_tag
);
1154 if (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_active
))
1155 bfa_os_assign(ioim
->iosp
->comp_rspmsg
, *m
);
1157 switch (rsp
->io_status
) {
1158 case BFI_IOIM_STS_OK
:
1159 bfa_fcpim_stats(fcpim
, iocomp_ok
);
1160 if (rsp
->reuse_io_tag
== 0)
1161 evt
= BFA_IOIM_SM_DONE
;
1163 evt
= BFA_IOIM_SM_COMP
;
1166 case BFI_IOIM_STS_TIMEDOUT
:
1167 case BFI_IOIM_STS_ABORTED
:
1168 rsp
->io_status
= BFI_IOIM_STS_ABORTED
;
1169 bfa_fcpim_stats(fcpim
, iocomp_aborted
);
1170 if (rsp
->reuse_io_tag
== 0)
1171 evt
= BFA_IOIM_SM_DONE
;
1173 evt
= BFA_IOIM_SM_COMP
;
1176 case BFI_IOIM_STS_PROTO_ERR
:
1177 bfa_fcpim_stats(fcpim
, iocom_proto_err
);
1178 bfa_assert(rsp
->reuse_io_tag
);
1179 evt
= BFA_IOIM_SM_COMP
;
1182 case BFI_IOIM_STS_SQER_NEEDED
:
1183 bfa_fcpim_stats(fcpim
, iocom_sqer_needed
);
1184 bfa_assert(rsp
->reuse_io_tag
== 0);
1185 evt
= BFA_IOIM_SM_SQRETRY
;
1188 case BFI_IOIM_STS_RES_FREE
:
1189 bfa_fcpim_stats(fcpim
, iocom_res_free
);
1190 evt
= BFA_IOIM_SM_FREE
;
1193 case BFI_IOIM_STS_HOST_ABORTED
:
1194 bfa_fcpim_stats(fcpim
, iocom_hostabrts
);
1195 if (rsp
->abort_tag
!= ioim
->abort_tag
) {
1196 bfa_trc(ioim
->bfa
, rsp
->abort_tag
);
1197 bfa_trc(ioim
->bfa
, ioim
->abort_tag
);
1201 if (rsp
->reuse_io_tag
)
1202 evt
= BFA_IOIM_SM_ABORT_COMP
;
1204 evt
= BFA_IOIM_SM_ABORT_DONE
;
1207 case BFI_IOIM_STS_UTAG
:
1208 bfa_fcpim_stats(fcpim
, iocom_utags
);
1209 evt
= BFA_IOIM_SM_COMP_UTAG
;
1216 bfa_sm_send_event(ioim
, evt
);
1220 bfa_ioim_good_comp_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1222 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1223 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
1224 struct bfa_ioim_s
*ioim
;
1227 iotag
= bfa_os_ntohs(rsp
->io_tag
);
1229 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
1230 bfa_assert(ioim
->iotag
== iotag
);
1232 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1233 bfa_sm_send_event(ioim
, BFA_IOIM_SM_COMP_GOOD
);
1237 * Called by itnim to clean up IO while going offline.
1240 bfa_ioim_cleanup(struct bfa_ioim_s
*ioim
)
1242 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1243 bfa_fcpim_stats(ioim
->fcpim
, io_cleanups
);
1245 ioim
->iosp
->tskim
= NULL
;
1246 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
1250 bfa_ioim_cleanup_tm(struct bfa_ioim_s
*ioim
, struct bfa_tskim_s
*tskim
)
1252 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1253 bfa_fcpim_stats(ioim
->fcpim
, io_tmaborts
);
1255 ioim
->iosp
->tskim
= tskim
;
1256 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
1260 * IOC failure handling.
1263 bfa_ioim_iocdisable(struct bfa_ioim_s
*ioim
)
1265 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HWFAIL
);
1269 * IO offline TOV popped. Fail the pending IO.
1272 bfa_ioim_tov(struct bfa_ioim_s
*ioim
)
1274 bfa_sm_send_event(ioim
, BFA_IOIM_SM_IOTOV
);
1284 * Allocate IOIM resource for initiator mode I/O request.
1287 bfa_ioim_alloc(struct bfa_s
*bfa
, struct bfad_ioim_s
*dio
,
1288 struct bfa_itnim_s
*itnim
, u16 nsges
)
1290 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1291 struct bfa_ioim_s
*ioim
;
1294 * alocate IOIM resource
1296 bfa_q_deq(&fcpim
->ioim_free_q
, &ioim
);
1298 bfa_fcpim_stats(fcpim
, no_iotags
);
1303 ioim
->itnim
= itnim
;
1304 ioim
->nsges
= nsges
;
1307 bfa_stats(fcpim
, total_ios
);
1308 bfa_stats(itnim
, ios
);
1309 fcpim
->ios_active
++;
1311 list_add_tail(&ioim
->qe
, &itnim
->io_q
);
1312 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1318 bfa_ioim_free(struct bfa_ioim_s
*ioim
)
1320 struct bfa_fcpim_mod_s
*fcpim
= ioim
->fcpim
;
1322 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1323 bfa_assert_fp(bfa_sm_cmp_state(ioim
, bfa_ioim_sm_uninit
));
1325 bfa_assert_fp(list_empty(&ioim
->sgpg_q
)
1326 || (ioim
->nsges
> BFI_SGE_INLINE
));
1328 if (ioim
->nsgpgs
> 0)
1329 bfa_sgpg_mfree(ioim
->bfa
, &ioim
->sgpg_q
, ioim
->nsgpgs
);
1331 bfa_stats(ioim
->itnim
, io_comps
);
1332 fcpim
->ios_active
--;
1334 list_del(&ioim
->qe
);
1335 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
1339 bfa_ioim_start(struct bfa_ioim_s
*ioim
)
1341 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1344 * Obtain the queue over which this request has to be issued
1346 ioim
->reqq
= bfa_fcpim_ioredirect_enabled(ioim
->bfa
) ?
1347 bfa_cb_ioim_get_reqq(ioim
->dio
) :
1348 bfa_itnim_get_reqq(ioim
);
1350 bfa_sm_send_event(ioim
, BFA_IOIM_SM_START
);
1354 * Driver I/O abort request.
1357 bfa_ioim_abort(struct bfa_ioim_s
*ioim
)
1359 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1360 bfa_fcpim_stats(ioim
->fcpim
, io_aborts
);
1361 bfa_sm_send_event(ioim
, BFA_IOIM_SM_ABORT
);