Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / scsi / bfa / bfa_ioim.c
1 /*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18 #include <bfa.h>
19 #include <cs/bfa_debug.h>
20 #include <bfa_cb_ioim_macros.h>
21
22 BFA_TRC_FILE(HAL, IOIM);
23
24 /*
25 * forward declarations.
26 */
27 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
28 static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
29 static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
30 static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
31 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
32 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
33 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
34 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
35 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
36 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
37
38 /**
39 * bfa_ioim_sm
40 */
41
42 /**
43 * IO state machine events
44 */
45 enum bfa_ioim_event {
46 BFA_IOIM_SM_START = 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
64 };
65
66 /*
67 * forward declaration of IO state machine
68 */
69 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
70 enum bfa_ioim_event event);
71 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
72 enum bfa_ioim_event event);
73 static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
74 enum bfa_ioim_event event);
75 static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
76 enum bfa_ioim_event event);
77 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
78 enum bfa_ioim_event event);
79 static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
80 enum bfa_ioim_event event);
81 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
82 enum bfa_ioim_event event);
83 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
84 enum bfa_ioim_event event);
85 static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
86 enum bfa_ioim_event event);
87 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
88 enum bfa_ioim_event event);
89 static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
90 enum bfa_ioim_event event);
91
92 /**
93 * IO is not started (unallocated).
94 */
95 static void
96 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
97 {
98 bfa_trc_fp(ioim->bfa, ioim->iotag);
99 bfa_trc_fp(ioim->bfa, event);
100
101 switch (event) {
102 case BFA_IOIM_SM_START:
103 if (!bfa_itnim_is_online(ioim->itnim)) {
104 if (!bfa_itnim_hold_io(ioim->itnim)) {
105 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
106 list_del(&ioim->qe);
107 list_add_tail(&ioim->qe,
108 &ioim->fcpim->ioim_comp_q);
109 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
110 __bfa_cb_ioim_pathtov, ioim);
111 } else {
112 list_del(&ioim->qe);
113 list_add_tail(&ioim->qe,
114 &ioim->itnim->pending_q);
115 }
116 break;
117 }
118
119 if (ioim->nsges > BFI_SGE_INLINE) {
120 if (!bfa_ioim_sge_setup(ioim)) {
121 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
122 return;
123 }
124 }
125
126 if (!bfa_ioim_send_ioreq(ioim)) {
127 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
128 break;
129 }
130
131 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
132 break;
133
134 case BFA_IOIM_SM_IOTOV:
135 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
136 list_del(&ioim->qe);
137 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
138 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
139 __bfa_cb_ioim_pathtov, ioim);
140 break;
141
142 case BFA_IOIM_SM_ABORT:
143 /**
144 * IO in pending queue can get abort requests. Complete abort
145 * requests immediately.
146 */
147 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
148 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
149 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
150 ioim);
151 break;
152
153 default:
154 bfa_sm_fault(ioim->bfa, event);
155 }
156 }
157
158 /**
159 * IO is waiting for SG pages.
160 */
161 static void
162 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
163 {
164 bfa_trc(ioim->bfa, ioim->iotag);
165 bfa_trc(ioim->bfa, event);
166
167 switch (event) {
168 case BFA_IOIM_SM_SGALLOCED:
169 if (!bfa_ioim_send_ioreq(ioim)) {
170 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
171 break;
172 }
173 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
174 break;
175
176 case BFA_IOIM_SM_CLEANUP:
177 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
178 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
179 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
180 ioim);
181 bfa_ioim_notify_cleanup(ioim);
182 break;
183
184 case BFA_IOIM_SM_ABORT:
185 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
186 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
187 list_del(&ioim->qe);
188 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
189 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
190 ioim);
191 break;
192
193 case BFA_IOIM_SM_HWFAIL:
194 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
195 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
196 list_del(&ioim->qe);
197 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
198 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
199 ioim);
200 break;
201
202 default:
203 bfa_sm_fault(ioim->bfa, event);
204 }
205 }
206
207 /**
208 * IO is active.
209 */
210 static void
211 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
212 {
213 bfa_trc_fp(ioim->bfa, ioim->iotag);
214 bfa_trc_fp(ioim->bfa, event);
215
216 switch (event) {
217 case BFA_IOIM_SM_COMP_GOOD:
218 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
219 list_del(&ioim->qe);
220 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
221 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
222 __bfa_cb_ioim_good_comp, ioim);
223 break;
224
225 case BFA_IOIM_SM_COMP:
226 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
227 list_del(&ioim->qe);
228 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
229 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
230 ioim);
231 break;
232
233 case BFA_IOIM_SM_DONE:
234 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
235 list_del(&ioim->qe);
236 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
237 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
238 ioim);
239 break;
240
241 case BFA_IOIM_SM_ABORT:
242 ioim->iosp->abort_explicit = BFA_TRUE;
243 ioim->io_cbfn = __bfa_cb_ioim_abort;
244
245 if (bfa_ioim_send_abort(ioim))
246 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
247 else {
248 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
249 bfa_reqq_wait(ioim->bfa, ioim->reqq,
250 &ioim->iosp->reqq_wait);
251 }
252 break;
253
254 case BFA_IOIM_SM_CLEANUP:
255 ioim->iosp->abort_explicit = BFA_FALSE;
256 ioim->io_cbfn = __bfa_cb_ioim_failed;
257
258 if (bfa_ioim_send_abort(ioim))
259 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
260 else {
261 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
262 bfa_reqq_wait(ioim->bfa, ioim->reqq,
263 &ioim->iosp->reqq_wait);
264 }
265 break;
266
267 case BFA_IOIM_SM_HWFAIL:
268 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
269 list_del(&ioim->qe);
270 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
271 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
272 ioim);
273 break;
274
275 default:
276 bfa_sm_fault(ioim->bfa, event);
277 }
278 }
279
280 /**
281 * IO is being aborted, waiting for completion from firmware.
282 */
283 static void
284 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
285 {
286 bfa_trc(ioim->bfa, ioim->iotag);
287 bfa_trc(ioim->bfa, event);
288
289 switch (event) {
290 case BFA_IOIM_SM_COMP_GOOD:
291 case BFA_IOIM_SM_COMP:
292 case BFA_IOIM_SM_DONE:
293 case BFA_IOIM_SM_FREE:
294 break;
295
296 case BFA_IOIM_SM_ABORT_DONE:
297 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
298 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
299 ioim);
300 break;
301
302 case BFA_IOIM_SM_ABORT_COMP:
303 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
304 list_del(&ioim->qe);
305 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
306 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
307 ioim);
308 break;
309
310 case BFA_IOIM_SM_COMP_UTAG:
311 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
312 list_del(&ioim->qe);
313 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
314 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
315 ioim);
316 break;
317
318 case BFA_IOIM_SM_CLEANUP:
319 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
320 ioim->iosp->abort_explicit = BFA_FALSE;
321
322 if (bfa_ioim_send_abort(ioim))
323 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
324 else {
325 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
326 bfa_reqq_wait(ioim->bfa, ioim->reqq,
327 &ioim->iosp->reqq_wait);
328 }
329 break;
330
331 case BFA_IOIM_SM_HWFAIL:
332 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
333 list_del(&ioim->qe);
334 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
335 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
336 ioim);
337 break;
338
339 default:
340 bfa_sm_fault(ioim->bfa, event);
341 }
342 }
343
344 /**
345 * IO is being cleaned up (implicit abort), waiting for completion from
346 * firmware.
347 */
348 static void
349 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
350 {
351 bfa_trc(ioim->bfa, ioim->iotag);
352 bfa_trc(ioim->bfa, event);
353
354 switch (event) {
355 case BFA_IOIM_SM_COMP_GOOD:
356 case BFA_IOIM_SM_COMP:
357 case BFA_IOIM_SM_DONE:
358 case BFA_IOIM_SM_FREE:
359 break;
360
361 case BFA_IOIM_SM_ABORT:
362 /**
363 * IO is already being aborted implicitly
364 */
365 ioim->io_cbfn = __bfa_cb_ioim_abort;
366 break;
367
368 case BFA_IOIM_SM_ABORT_DONE:
369 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
370 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
371 bfa_ioim_notify_cleanup(ioim);
372 break;
373
374 case BFA_IOIM_SM_ABORT_COMP:
375 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
376 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
377 bfa_ioim_notify_cleanup(ioim);
378 break;
379
380 case BFA_IOIM_SM_COMP_UTAG:
381 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
382 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
383 bfa_ioim_notify_cleanup(ioim);
384 break;
385
386 case BFA_IOIM_SM_HWFAIL:
387 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
388 list_del(&ioim->qe);
389 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
390 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
391 ioim);
392 break;
393
394 case BFA_IOIM_SM_CLEANUP:
395 /**
396 * IO can be in cleanup state already due to TM command. 2nd cleanup
397 * request comes from ITN offline event.
398 */
399 break;
400
401 default:
402 bfa_sm_fault(ioim->bfa, event);
403 }
404 }
405
406 /**
407 * IO is waiting for room in request CQ
408 */
409 static void
410 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
411 {
412 bfa_trc(ioim->bfa, ioim->iotag);
413 bfa_trc(ioim->bfa, event);
414
415 switch (event) {
416 case BFA_IOIM_SM_QRESUME:
417 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
418 bfa_ioim_send_ioreq(ioim);
419 break;
420
421 case BFA_IOIM_SM_ABORT:
422 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
423 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
424 list_del(&ioim->qe);
425 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
426 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
427 ioim);
428 break;
429
430 case BFA_IOIM_SM_CLEANUP:
431 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
432 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
433 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
434 ioim);
435 bfa_ioim_notify_cleanup(ioim);
436 break;
437
438 case BFA_IOIM_SM_HWFAIL:
439 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
440 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
441 list_del(&ioim->qe);
442 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
443 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
444 ioim);
445 break;
446
447 default:
448 bfa_sm_fault(ioim->bfa, event);
449 }
450 }
451
452 /**
453 * Active IO is being aborted, waiting for room in request CQ.
454 */
455 static void
456 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
457 {
458 bfa_trc(ioim->bfa, ioim->iotag);
459 bfa_trc(ioim->bfa, event);
460
461 switch (event) {
462 case BFA_IOIM_SM_QRESUME:
463 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
464 bfa_ioim_send_abort(ioim);
465 break;
466
467 case BFA_IOIM_SM_CLEANUP:
468 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
469 ioim->iosp->abort_explicit = BFA_FALSE;
470 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
471 break;
472
473 case BFA_IOIM_SM_COMP_GOOD:
474 case BFA_IOIM_SM_COMP:
475 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
476 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
477 list_del(&ioim->qe);
478 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
479 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
480 ioim);
481 break;
482
483 case BFA_IOIM_SM_DONE:
484 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
485 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
486 list_del(&ioim->qe);
487 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
488 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
489 ioim);
490 break;
491
492 case BFA_IOIM_SM_HWFAIL:
493 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
494 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
495 list_del(&ioim->qe);
496 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
497 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
498 ioim);
499 break;
500
501 default:
502 bfa_sm_fault(ioim->bfa, event);
503 }
504 }
505
506 /**
507 * Active IO is being cleaned up, waiting for room in request CQ.
508 */
509 static void
510 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
511 {
512 bfa_trc(ioim->bfa, ioim->iotag);
513 bfa_trc(ioim->bfa, event);
514
515 switch (event) {
516 case BFA_IOIM_SM_QRESUME:
517 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
518 bfa_ioim_send_abort(ioim);
519 break;
520
521 case BFA_IOIM_SM_ABORT:
522 /**
523 * IO is already being cleaned up implicitly
524 */
525 ioim->io_cbfn = __bfa_cb_ioim_abort;
526 break;
527
528 case BFA_IOIM_SM_COMP_GOOD:
529 case BFA_IOIM_SM_COMP:
530 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
531 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
532 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
533 bfa_ioim_notify_cleanup(ioim);
534 break;
535
536 case BFA_IOIM_SM_DONE:
537 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
538 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
539 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
540 bfa_ioim_notify_cleanup(ioim);
541 break;
542
543 case BFA_IOIM_SM_HWFAIL:
544 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
545 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
546 list_del(&ioim->qe);
547 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
548 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
549 ioim);
550 break;
551
552 default:
553 bfa_sm_fault(ioim->bfa, event);
554 }
555 }
556
557 /**
558 * IO bfa callback is pending.
559 */
560 static void
561 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
562 {
563 bfa_trc_fp(ioim->bfa, ioim->iotag);
564 bfa_trc_fp(ioim->bfa, event);
565
566 switch (event) {
567 case BFA_IOIM_SM_HCB:
568 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
569 bfa_ioim_free(ioim);
570 bfa_cb_ioim_resfree(ioim->bfa->bfad);
571 break;
572
573 case BFA_IOIM_SM_CLEANUP:
574 bfa_ioim_notify_cleanup(ioim);
575 break;
576
577 case BFA_IOIM_SM_HWFAIL:
578 break;
579
580 default:
581 bfa_sm_fault(ioim->bfa, event);
582 }
583 }
584
585 /**
586 * IO bfa callback is pending. IO resource cannot be freed.
587 */
588 static void
589 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
590 {
591 bfa_trc(ioim->bfa, ioim->iotag);
592 bfa_trc(ioim->bfa, event);
593
594 switch (event) {
595 case BFA_IOIM_SM_HCB:
596 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
597 list_del(&ioim->qe);
598 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
599 break;
600
601 case BFA_IOIM_SM_FREE:
602 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
603 break;
604
605 case BFA_IOIM_SM_CLEANUP:
606 bfa_ioim_notify_cleanup(ioim);
607 break;
608
609 case BFA_IOIM_SM_HWFAIL:
610 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
611 break;
612
613 default:
614 bfa_sm_fault(ioim->bfa, event);
615 }
616 }
617
618 /**
619 * IO is completed, waiting resource free from firmware.
620 */
621 static void
622 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
623 {
624 bfa_trc(ioim->bfa, ioim->iotag);
625 bfa_trc(ioim->bfa, event);
626
627 switch (event) {
628 case BFA_IOIM_SM_FREE:
629 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
630 bfa_ioim_free(ioim);
631 bfa_cb_ioim_resfree(ioim->bfa->bfad);
632 break;
633
634 case BFA_IOIM_SM_CLEANUP:
635 bfa_ioim_notify_cleanup(ioim);
636 break;
637
638 case BFA_IOIM_SM_HWFAIL:
639 break;
640
641 default:
642 bfa_sm_fault(ioim->bfa, event);
643 }
644 }
645
646
647
648 /**
649 * bfa_ioim_private
650 */
651
652 static void
653 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
654 {
655 struct bfa_ioim_s *ioim = cbarg;
656
657 if (!complete) {
658 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
659 return;
660 }
661
662 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
663 }
664
665 static void
666 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
667 {
668 struct bfa_ioim_s *ioim = cbarg;
669 struct bfi_ioim_rsp_s *m;
670 u8 *snsinfo = NULL;
671 u8 sns_len = 0;
672 s32 residue = 0;
673
674 if (!complete) {
675 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
676 return;
677 }
678
679 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
680 if (m->io_status == BFI_IOIM_STS_OK) {
681 /**
682 * setup sense information, if present
683 */
684 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
685 && m->sns_len) {
686 sns_len = m->sns_len;
687 snsinfo = ioim->iosp->snsinfo;
688 }
689
690 /**
691 * setup residue value correctly for normal completions
692 */
693 if (m->resid_flags == FCP_RESID_UNDER)
694 residue = bfa_os_ntohl(m->residue);
695 if (m->resid_flags == FCP_RESID_OVER) {
696 residue = bfa_os_ntohl(m->residue);
697 residue = -residue;
698 }
699 }
700
701 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
702 m->scsi_status, sns_len, snsinfo, residue);
703 }
704
705 static void
706 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
707 {
708 struct bfa_ioim_s *ioim = cbarg;
709
710 if (!complete) {
711 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
712 return;
713 }
714
715 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
716 0, 0, NULL, 0);
717 }
718
719 static void
720 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
721 {
722 struct bfa_ioim_s *ioim = cbarg;
723
724 if (!complete) {
725 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
726 return;
727 }
728
729 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
730 0, 0, NULL, 0);
731 }
732
733 static void
734 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
735 {
736 struct bfa_ioim_s *ioim = cbarg;
737
738 if (!complete) {
739 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
740 return;
741 }
742
743 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
744 }
745
746 static void
747 bfa_ioim_sgpg_alloced(void *cbarg)
748 {
749 struct bfa_ioim_s *ioim = cbarg;
750
751 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
752 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
753 bfa_ioim_sgpg_setup(ioim);
754 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
755 }
756
757 /**
758 * Send I/O request to firmware.
759 */
760 static bfa_boolean_t
761 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
762 {
763 struct bfa_itnim_s *itnim = ioim->itnim;
764 struct bfi_ioim_req_s *m;
765 static struct fcp_cmnd_s cmnd_z0 = { 0 };
766 struct bfi_sge_s *sge;
767 u32 pgdlen = 0;
768 u64 addr;
769 struct scatterlist *sg;
770 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
771
772 /**
773 * check for room in queue to send request now
774 */
775 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
776 if (!m) {
777 bfa_reqq_wait(ioim->bfa, ioim->reqq,
778 &ioim->iosp->reqq_wait);
779 return BFA_FALSE;
780 }
781
782 /**
783 * build i/o request message next
784 */
785 m->io_tag = bfa_os_htons(ioim->iotag);
786 m->rport_hdl = ioim->itnim->rport->fw_handle;
787 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
788
789 /**
790 * build inline IO SG element here
791 */
792 sge = &m->sges[0];
793 if (ioim->nsges) {
794 sg = (struct scatterlist *)scsi_sglist(cmnd);
795 addr = bfa_os_sgaddr(sg_dma_address(sg));
796 sge->sga = *(union bfi_addr_u *) &addr;
797 pgdlen = sg_dma_len(sg);
798 sge->sg_len = pgdlen;
799 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
800 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
801 bfa_sge_to_be(sge);
802 sge++;
803 }
804
805 if (ioim->nsges > BFI_SGE_INLINE) {
806 sge->sga = ioim->sgpg->sgpg_pa;
807 } else {
808 sge->sga.a32.addr_lo = 0;
809 sge->sga.a32.addr_hi = 0;
810 }
811 sge->sg_len = pgdlen;
812 sge->flags = BFI_SGE_PGDLEN;
813 bfa_sge_to_be(sge);
814
815 /**
816 * set up I/O command parameters
817 */
818 bfa_os_assign(m->cmnd, cmnd_z0);
819 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
820 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
821 bfa_os_assign(m->cmnd.cdb,
822 *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
823 m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
824
825 /**
826 * set up I/O message header
827 */
828 switch (m->cmnd.iodir) {
829 case FCP_IODIR_READ:
830 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
831 bfa_stats(itnim, input_reqs);
832 break;
833 case FCP_IODIR_WRITE:
834 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
835 bfa_stats(itnim, output_reqs);
836 break;
837 case FCP_IODIR_RW:
838 bfa_stats(itnim, input_reqs);
839 bfa_stats(itnim, output_reqs);
840 default:
841 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
842 }
843 if (itnim->seq_rec ||
844 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
845 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
846
847 #ifdef IOIM_ADVANCED
848 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
849 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
850 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
851
852 /**
853 * Handle large CDB (>16 bytes).
854 */
855 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
856 FCP_CMND_CDB_LEN) / sizeof(u32);
857 if (m->cmnd.addl_cdb_len) {
858 bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
859 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
860 m->cmnd.addl_cdb_len * sizeof(u32));
861 fcp_cmnd_fcpdl(&m->cmnd) =
862 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
863 }
864 #endif
865
866 /**
867 * queue I/O message to firmware
868 */
869 bfa_reqq_produce(ioim->bfa, ioim->reqq);
870 return BFA_TRUE;
871 }
872
873 /**
874 * Setup any additional SG pages needed.Inline SG element is setup
875 * at queuing time.
876 */
877 static bfa_boolean_t
878 bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
879 {
880 u16 nsgpgs;
881
882 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
883
884 /**
885 * allocate SG pages needed
886 */
887 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
888 if (!nsgpgs)
889 return BFA_TRUE;
890
891 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
892 != BFA_STATUS_OK) {
893 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
894 return BFA_FALSE;
895 }
896
897 ioim->nsgpgs = nsgpgs;
898 bfa_ioim_sgpg_setup(ioim);
899
900 return BFA_TRUE;
901 }
902
903 static void
904 bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
905 {
906 int sgeid, nsges, i;
907 struct bfi_sge_s *sge;
908 struct bfa_sgpg_s *sgpg;
909 u32 pgcumsz;
910 u64 addr;
911 struct scatterlist *sg;
912 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
913
914 sgeid = BFI_SGE_INLINE;
915 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
916
917 sg = scsi_sglist(cmnd);
918 sg = sg_next(sg);
919
920 do {
921 sge = sgpg->sgpg->sges;
922 nsges = ioim->nsges - sgeid;
923 if (nsges > BFI_SGPG_DATA_SGES)
924 nsges = BFI_SGPG_DATA_SGES;
925
926 pgcumsz = 0;
927 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
928 addr = bfa_os_sgaddr(sg_dma_address(sg));
929 sge->sga = *(union bfi_addr_u *) &addr;
930 sge->sg_len = sg_dma_len(sg);
931 pgcumsz += sge->sg_len;
932
933 /**
934 * set flags
935 */
936 if (i < (nsges - 1))
937 sge->flags = BFI_SGE_DATA;
938 else if (sgeid < (ioim->nsges - 1))
939 sge->flags = BFI_SGE_DATA_CPL;
940 else
941 sge->flags = BFI_SGE_DATA_LAST;
942 }
943
944 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
945
946 /**
947 * set the link element of each page
948 */
949 if (sgeid == ioim->nsges) {
950 sge->flags = BFI_SGE_PGDLEN;
951 sge->sga.a32.addr_lo = 0;
952 sge->sga.a32.addr_hi = 0;
953 } else {
954 sge->flags = BFI_SGE_LINK;
955 sge->sga = sgpg->sgpg_pa;
956 }
957 sge->sg_len = pgcumsz;
958 } while (sgeid < ioim->nsges);
959 }
960
961 /**
962 * Send I/O abort request to firmware.
963 */
964 static bfa_boolean_t
965 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
966 {
967 struct bfi_ioim_abort_req_s *m;
968 enum bfi_ioim_h2i msgop;
969
970 /**
971 * check for room in queue to send request now
972 */
973 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
974 if (!m)
975 return BFA_FALSE;
976
977 /**
978 * build i/o request message next
979 */
980 if (ioim->iosp->abort_explicit)
981 msgop = BFI_IOIM_H2I_IOABORT_REQ;
982 else
983 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
984
985 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
986 m->io_tag = bfa_os_htons(ioim->iotag);
987 m->abort_tag = ++ioim->abort_tag;
988
989 /**
990 * queue I/O message to firmware
991 */
992 bfa_reqq_produce(ioim->bfa, ioim->reqq);
993 return BFA_TRUE;
994 }
995
996 /**
997 * Call to resume any I/O requests waiting for room in request queue.
998 */
999 static void
1000 bfa_ioim_qresume(void *cbarg)
1001 {
1002 struct bfa_ioim_s *ioim = cbarg;
1003
1004 bfa_fcpim_stats(ioim->fcpim, qresumes);
1005 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
1006 }
1007
1008
1009 static void
1010 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
1011 {
1012 /**
1013 * Move IO from itnim queue to fcpim global queue since itnim will be
1014 * freed.
1015 */
1016 list_del(&ioim->qe);
1017 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1018
1019 if (!ioim->iosp->tskim) {
1020 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
1021 bfa_cb_dequeue(&ioim->hcb_qe);
1022 list_del(&ioim->qe);
1023 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
1024 }
1025 bfa_itnim_iodone(ioim->itnim);
1026 } else
1027 bfa_tskim_iodone(ioim->iosp->tskim);
1028 }
1029
1030 /**
1031 * or after the link comes back.
1032 */
1033 void
1034 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
1035 {
1036 /**
1037 * If path tov timer expired, failback with PATHTOV status - these
1038 * IO requests are not normally retried by IO stack.
1039 *
1040 * Otherwise device cameback online and fail it with normal failed
1041 * status so that IO stack retries these failed IO requests.
1042 */
1043 if (iotov)
1044 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
1045 else
1046 ioim->io_cbfn = __bfa_cb_ioim_failed;
1047
1048 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1049
1050 /**
1051 * Move IO to fcpim global queue since itnim will be
1052 * freed.
1053 */
1054 list_del(&ioim->qe);
1055 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1056 }
1057
1058
1059
1060 /**
1061 * bfa_ioim_friend
1062 */
1063
1064 /**
1065 * Memory allocation and initialization.
1066 */
1067 void
1068 bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1069 {
1070 struct bfa_ioim_s *ioim;
1071 struct bfa_ioim_sp_s *iosp;
1072 u16 i;
1073 u8 *snsinfo;
1074 u32 snsbufsz;
1075
1076 /**
1077 * claim memory first
1078 */
1079 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
1080 fcpim->ioim_arr = ioim;
1081 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
1082
1083 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
1084 fcpim->ioim_sp_arr = iosp;
1085 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
1086
1087 /**
1088 * Claim DMA memory for per IO sense data.
1089 */
1090 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
1091 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
1092 bfa_meminfo_dma_phys(minfo) += snsbufsz;
1093
1094 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
1095 bfa_meminfo_dma_virt(minfo) += snsbufsz;
1096 snsinfo = fcpim->snsbase.kva;
1097 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
1098
1099 /**
1100 * Initialize ioim free queues
1101 */
1102 INIT_LIST_HEAD(&fcpim->ioim_free_q);
1103 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
1104 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
1105
1106 for (i = 0; i < fcpim->num_ioim_reqs;
1107 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
1108 /*
1109 * initialize IOIM
1110 */
1111 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
1112 ioim->iotag = i;
1113 ioim->bfa = fcpim->bfa;
1114 ioim->fcpim = fcpim;
1115 ioim->iosp = iosp;
1116 iosp->snsinfo = snsinfo;
1117 INIT_LIST_HEAD(&ioim->sgpg_q);
1118 bfa_reqq_winit(&ioim->iosp->reqq_wait,
1119 bfa_ioim_qresume, ioim);
1120 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
1121 bfa_ioim_sgpg_alloced, ioim);
1122 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1123
1124 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1125 }
1126 }
1127
1128 /**
1129 * Driver detach time call.
1130 */
1131 void
1132 bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
1133 {
1134 }
1135
1136 void
1137 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1138 {
1139 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1140 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1141 struct bfa_ioim_s *ioim;
1142 u16 iotag;
1143 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
1144
1145 iotag = bfa_os_ntohs(rsp->io_tag);
1146
1147 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1148 bfa_assert(ioim->iotag == iotag);
1149
1150 bfa_trc(ioim->bfa, ioim->iotag);
1151 bfa_trc(ioim->bfa, rsp->io_status);
1152 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
1153
1154 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
1155 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
1156
1157 switch (rsp->io_status) {
1158 case BFI_IOIM_STS_OK:
1159 bfa_fcpim_stats(fcpim, iocomp_ok);
1160 if (rsp->reuse_io_tag == 0)
1161 evt = BFA_IOIM_SM_DONE;
1162 else
1163 evt = BFA_IOIM_SM_COMP;
1164 break;
1165
1166 case BFI_IOIM_STS_TIMEDOUT:
1167 case BFI_IOIM_STS_ABORTED:
1168 rsp->io_status = BFI_IOIM_STS_ABORTED;
1169 bfa_fcpim_stats(fcpim, iocomp_aborted);
1170 if (rsp->reuse_io_tag == 0)
1171 evt = BFA_IOIM_SM_DONE;
1172 else
1173 evt = BFA_IOIM_SM_COMP;
1174 break;
1175
1176 case BFI_IOIM_STS_PROTO_ERR:
1177 bfa_fcpim_stats(fcpim, iocom_proto_err);
1178 bfa_assert(rsp->reuse_io_tag);
1179 evt = BFA_IOIM_SM_COMP;
1180 break;
1181
1182 case BFI_IOIM_STS_SQER_NEEDED:
1183 bfa_fcpim_stats(fcpim, iocom_sqer_needed);
1184 bfa_assert(rsp->reuse_io_tag == 0);
1185 evt = BFA_IOIM_SM_SQRETRY;
1186 break;
1187
1188 case BFI_IOIM_STS_RES_FREE:
1189 bfa_fcpim_stats(fcpim, iocom_res_free);
1190 evt = BFA_IOIM_SM_FREE;
1191 break;
1192
1193 case BFI_IOIM_STS_HOST_ABORTED:
1194 bfa_fcpim_stats(fcpim, iocom_hostabrts);
1195 if (rsp->abort_tag != ioim->abort_tag) {
1196 bfa_trc(ioim->bfa, rsp->abort_tag);
1197 bfa_trc(ioim->bfa, ioim->abort_tag);
1198 return;
1199 }
1200
1201 if (rsp->reuse_io_tag)
1202 evt = BFA_IOIM_SM_ABORT_COMP;
1203 else
1204 evt = BFA_IOIM_SM_ABORT_DONE;
1205 break;
1206
1207 case BFI_IOIM_STS_UTAG:
1208 bfa_fcpim_stats(fcpim, iocom_utags);
1209 evt = BFA_IOIM_SM_COMP_UTAG;
1210 break;
1211
1212 default:
1213 bfa_assert(0);
1214 }
1215
1216 bfa_sm_send_event(ioim, evt);
1217 }
1218
1219 void
1220 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1221 {
1222 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1223 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1224 struct bfa_ioim_s *ioim;
1225 u16 iotag;
1226
1227 iotag = bfa_os_ntohs(rsp->io_tag);
1228
1229 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1230 bfa_assert(ioim->iotag == iotag);
1231
1232 bfa_trc_fp(ioim->bfa, ioim->iotag);
1233 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
1234 }
1235
1236 /**
1237 * Called by itnim to clean up IO while going offline.
1238 */
1239 void
1240 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
1241 {
1242 bfa_trc(ioim->bfa, ioim->iotag);
1243 bfa_fcpim_stats(ioim->fcpim, io_cleanups);
1244
1245 ioim->iosp->tskim = NULL;
1246 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1247 }
1248
1249 void
1250 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
1251 {
1252 bfa_trc(ioim->bfa, ioim->iotag);
1253 bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
1254
1255 ioim->iosp->tskim = tskim;
1256 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1257 }
1258
1259 /**
1260 * IOC failure handling.
1261 */
1262 void
1263 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
1264 {
1265 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
1266 }
1267
1268 /**
1269 * IO offline TOV popped. Fail the pending IO.
1270 */
1271 void
1272 bfa_ioim_tov(struct bfa_ioim_s *ioim)
1273 {
1274 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
1275 }
1276
1277
1278
1279 /**
1280 * bfa_ioim_api
1281 */
1282
1283 /**
1284 * Allocate IOIM resource for initiator mode I/O request.
1285 */
1286 struct bfa_ioim_s *
1287 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
1288 struct bfa_itnim_s *itnim, u16 nsges)
1289 {
1290 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1291 struct bfa_ioim_s *ioim;
1292
1293 /**
1294 * alocate IOIM resource
1295 */
1296 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
1297 if (!ioim) {
1298 bfa_fcpim_stats(fcpim, no_iotags);
1299 return NULL;
1300 }
1301
1302 ioim->dio = dio;
1303 ioim->itnim = itnim;
1304 ioim->nsges = nsges;
1305 ioim->nsgpgs = 0;
1306
1307 bfa_stats(fcpim, total_ios);
1308 bfa_stats(itnim, ios);
1309 fcpim->ios_active++;
1310
1311 list_add_tail(&ioim->qe, &itnim->io_q);
1312 bfa_trc_fp(ioim->bfa, ioim->iotag);
1313
1314 return ioim;
1315 }
1316
1317 void
1318 bfa_ioim_free(struct bfa_ioim_s *ioim)
1319 {
1320 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
1321
1322 bfa_trc_fp(ioim->bfa, ioim->iotag);
1323 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
1324
1325 bfa_assert_fp(list_empty(&ioim->sgpg_q)
1326 || (ioim->nsges > BFI_SGE_INLINE));
1327
1328 if (ioim->nsgpgs > 0)
1329 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
1330
1331 bfa_stats(ioim->itnim, io_comps);
1332 fcpim->ios_active--;
1333
1334 list_del(&ioim->qe);
1335 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1336 }
1337
1338 void
1339 bfa_ioim_start(struct bfa_ioim_s *ioim)
1340 {
1341 bfa_trc_fp(ioim->bfa, ioim->iotag);
1342
1343 /**
1344 * Obtain the queue over which this request has to be issued
1345 */
1346 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
1347 bfa_cb_ioim_get_reqq(ioim->dio) :
1348 bfa_itnim_get_reqq(ioim);
1349
1350 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
1351 }
1352
1353 /**
1354 * Driver I/O abort request.
1355 */
1356 void
1357 bfa_ioim_abort(struct bfa_ioim_s *ioim)
1358 {
1359 bfa_trc(ioim->bfa, ioim->iotag);
1360 bfa_fcpim_stats(ioim->fcpim, io_aborts);
1361 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
1362 }
1363
1364