2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfa_modules.h"
21 BFA_TRC_FILE(HAL
, FCPIM
);
25 * BFA ITNIM Related definitions
27 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s
*itnim
);
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
30 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
32 #define bfa_fcpim_additn(__itnim) \
33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim) do { \
35 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
36 bfa_itnim_update_del_itn_stats(__itnim); \
37 list_del(&(__itnim)->qe); \
38 WARN_ON(!list_empty(&(__itnim)->io_q)); \
39 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
40 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
43 #define bfa_itnim_online_cb(__itnim) do { \
44 if ((__itnim)->bfa->fcs) \
45 bfa_cb_itnim_online((__itnim)->ditn); \
47 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
48 __bfa_cb_itnim_online, (__itnim)); \
52 #define bfa_itnim_offline_cb(__itnim) do { \
53 if ((__itnim)->bfa->fcs) \
54 bfa_cb_itnim_offline((__itnim)->ditn); \
56 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
57 __bfa_cb_itnim_offline, (__itnim)); \
61 #define bfa_itnim_sler_cb(__itnim) do { \
62 if ((__itnim)->bfa->fcs) \
63 bfa_cb_itnim_sler((__itnim)->ditn); \
65 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
66 __bfa_cb_itnim_sler, (__itnim)); \
71 * itnim state machine event
73 enum bfa_itnim_event
{
74 BFA_ITNIM_SM_CREATE
= 1, /* itnim is created */
75 BFA_ITNIM_SM_ONLINE
= 2, /* itnim is online */
76 BFA_ITNIM_SM_OFFLINE
= 3, /* itnim is offline */
77 BFA_ITNIM_SM_FWRSP
= 4, /* firmware response */
78 BFA_ITNIM_SM_DELETE
= 5, /* deleting an existing itnim */
79 BFA_ITNIM_SM_CLEANUP
= 6, /* IO cleanup completion */
80 BFA_ITNIM_SM_SLER
= 7, /* second level error recovery */
81 BFA_ITNIM_SM_HWFAIL
= 8, /* IOC h/w failure event */
82 BFA_ITNIM_SM_QRESUME
= 9, /* queue space available */
86 * BFA IOIM related definitions
88 #define bfa_ioim_move_to_comp_q(__ioim) do { \
89 list_del(&(__ioim)->qe); \
90 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
94 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
95 if ((__fcpim)->profile_comp) \
96 (__fcpim)->profile_comp(__ioim); \
99 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
100 if ((__fcpim)->profile_start) \
101 (__fcpim)->profile_start(__ioim); \
105 * IO state machine events
107 enum bfa_ioim_event
{
108 BFA_IOIM_SM_START
= 1, /* io start request from host */
109 BFA_IOIM_SM_COMP_GOOD
= 2, /* io good comp, resource free */
110 BFA_IOIM_SM_COMP
= 3, /* io comp, resource is free */
111 BFA_IOIM_SM_COMP_UTAG
= 4, /* io comp, resource is free */
112 BFA_IOIM_SM_DONE
= 5, /* io comp, resource not free */
113 BFA_IOIM_SM_FREE
= 6, /* io resource is freed */
114 BFA_IOIM_SM_ABORT
= 7, /* abort request from scsi stack */
115 BFA_IOIM_SM_ABORT_COMP
= 8, /* abort from f/w */
116 BFA_IOIM_SM_ABORT_DONE
= 9, /* abort completion from f/w */
117 BFA_IOIM_SM_QRESUME
= 10, /* CQ space available to queue IO */
118 BFA_IOIM_SM_SGALLOCED
= 11, /* SG page allocation successful */
119 BFA_IOIM_SM_SQRETRY
= 12, /* sequence recovery retry */
120 BFA_IOIM_SM_HCB
= 13, /* bfa callback complete */
121 BFA_IOIM_SM_CLEANUP
= 14, /* IO cleanup from itnim */
122 BFA_IOIM_SM_TMSTART
= 15, /* IO cleanup from tskim */
123 BFA_IOIM_SM_TMDONE
= 16, /* IO cleanup from tskim */
124 BFA_IOIM_SM_HWFAIL
= 17, /* IOC h/w failure event */
125 BFA_IOIM_SM_IOTOV
= 18, /* ITN offline TOV */
130 * BFA TSKIM related definitions
134 * task management completion handling
136 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
137 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
138 bfa_tskim_notify_comp(__tskim); \
141 #define bfa_tskim_notify_comp(__tskim) do { \
142 if ((__tskim)->notify) \
143 bfa_itnim_tskdone((__tskim)->itnim); \
147 enum bfa_tskim_event
{
148 BFA_TSKIM_SM_START
= 1, /* TM command start */
149 BFA_TSKIM_SM_DONE
= 2, /* TM completion */
150 BFA_TSKIM_SM_QRESUME
= 3, /* resume after qfull */
151 BFA_TSKIM_SM_HWFAIL
= 5, /* IOC h/w failure event */
152 BFA_TSKIM_SM_HCB
= 6, /* BFA callback completion */
153 BFA_TSKIM_SM_IOS_DONE
= 7, /* IO and sub TM completions */
154 BFA_TSKIM_SM_CLEANUP
= 8, /* TM cleanup on ITN offline */
155 BFA_TSKIM_SM_CLEANUP_DONE
= 9, /* TM abort completion */
159 * forward declaration for BFA ITNIM functions
161 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s
*itnim
);
162 static bfa_boolean_t
bfa_itnim_send_fwcreate(struct bfa_itnim_s
*itnim
);
163 static bfa_boolean_t
bfa_itnim_send_fwdelete(struct bfa_itnim_s
*itnim
);
164 static void bfa_itnim_cleanp_comp(void *itnim_cbarg
);
165 static void bfa_itnim_cleanup(struct bfa_itnim_s
*itnim
);
166 static void __bfa_cb_itnim_online(void *cbarg
, bfa_boolean_t complete
);
167 static void __bfa_cb_itnim_offline(void *cbarg
, bfa_boolean_t complete
);
168 static void __bfa_cb_itnim_sler(void *cbarg
, bfa_boolean_t complete
);
169 static void bfa_itnim_iotov_online(struct bfa_itnim_s
*itnim
);
170 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s
*itnim
);
171 static void bfa_itnim_iotov(void *itnim_arg
);
172 static void bfa_itnim_iotov_start(struct bfa_itnim_s
*itnim
);
173 static void bfa_itnim_iotov_stop(struct bfa_itnim_s
*itnim
);
174 static void bfa_itnim_iotov_delete(struct bfa_itnim_s
*itnim
);
177 * forward declaration of ITNIM state machine
179 static void bfa_itnim_sm_uninit(struct bfa_itnim_s
*itnim
,
180 enum bfa_itnim_event event
);
181 static void bfa_itnim_sm_created(struct bfa_itnim_s
*itnim
,
182 enum bfa_itnim_event event
);
183 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s
*itnim
,
184 enum bfa_itnim_event event
);
185 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s
*itnim
,
186 enum bfa_itnim_event event
);
187 static void bfa_itnim_sm_online(struct bfa_itnim_s
*itnim
,
188 enum bfa_itnim_event event
);
189 static void bfa_itnim_sm_sler(struct bfa_itnim_s
*itnim
,
190 enum bfa_itnim_event event
);
191 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s
*itnim
,
192 enum bfa_itnim_event event
);
193 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s
*itnim
,
194 enum bfa_itnim_event event
);
195 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s
*itnim
,
196 enum bfa_itnim_event event
);
197 static void bfa_itnim_sm_offline(struct bfa_itnim_s
*itnim
,
198 enum bfa_itnim_event event
);
199 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s
*itnim
,
200 enum bfa_itnim_event event
);
201 static void bfa_itnim_sm_deleting(struct bfa_itnim_s
*itnim
,
202 enum bfa_itnim_event event
);
203 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s
*itnim
,
204 enum bfa_itnim_event event
);
205 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s
*itnim
,
206 enum bfa_itnim_event event
);
207 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s
*itnim
,
208 enum bfa_itnim_event event
);
211 * forward declaration for BFA IOIM functions
213 static bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
);
214 static bfa_boolean_t
bfa_ioim_sgpg_alloc(struct bfa_ioim_s
*ioim
);
215 static bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
);
216 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
);
217 static void __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
);
218 static void __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
);
219 static void __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
);
220 static void __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
);
221 static void __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
);
222 static bfa_boolean_t
bfa_ioim_is_abortable(struct bfa_ioim_s
*ioim
);
225 * forward declaration of BFA IO state machine
227 static void bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
,
228 enum bfa_ioim_event event
);
229 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
,
230 enum bfa_ioim_event event
);
231 static void bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
,
232 enum bfa_ioim_event event
);
233 static void bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
,
234 enum bfa_ioim_event event
);
235 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
,
236 enum bfa_ioim_event event
);
237 static void bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
,
238 enum bfa_ioim_event event
);
239 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
,
240 enum bfa_ioim_event event
);
241 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
,
242 enum bfa_ioim_event event
);
243 static void bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
,
244 enum bfa_ioim_event event
);
245 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
,
246 enum bfa_ioim_event event
);
247 static void bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
,
248 enum bfa_ioim_event event
);
249 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s
*ioim
,
250 enum bfa_ioim_event event
);
252 * forward declaration for BFA TSKIM functions
254 static void __bfa_cb_tskim_done(void *cbarg
, bfa_boolean_t complete
);
255 static void __bfa_cb_tskim_failed(void *cbarg
, bfa_boolean_t complete
);
256 static bfa_boolean_t
bfa_tskim_match_scope(struct bfa_tskim_s
*tskim
,
257 struct scsi_lun lun
);
258 static void bfa_tskim_gather_ios(struct bfa_tskim_s
*tskim
);
259 static void bfa_tskim_cleanp_comp(void *tskim_cbarg
);
260 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s
*tskim
);
261 static bfa_boolean_t
bfa_tskim_send(struct bfa_tskim_s
*tskim
);
262 static bfa_boolean_t
bfa_tskim_send_abort(struct bfa_tskim_s
*tskim
);
263 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s
*tskim
);
266 * forward declaration of BFA TSKIM state machine
268 static void bfa_tskim_sm_uninit(struct bfa_tskim_s
*tskim
,
269 enum bfa_tskim_event event
);
270 static void bfa_tskim_sm_active(struct bfa_tskim_s
*tskim
,
271 enum bfa_tskim_event event
);
272 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s
*tskim
,
273 enum bfa_tskim_event event
);
274 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s
*tskim
,
275 enum bfa_tskim_event event
);
276 static void bfa_tskim_sm_qfull(struct bfa_tskim_s
*tskim
,
277 enum bfa_tskim_event event
);
278 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s
*tskim
,
279 enum bfa_tskim_event event
);
280 static void bfa_tskim_sm_hcb(struct bfa_tskim_s
*tskim
,
281 enum bfa_tskim_event event
);
283 * BFA FCP Initiator Mode module
287 * Compute and return memory needed by FCP(im) module.
290 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
293 bfa_itnim_meminfo(cfg
, km_len
, dm_len
);
298 if (cfg
->fwcfg
.num_ioim_reqs
< BFA_IOIM_MIN
)
299 cfg
->fwcfg
.num_ioim_reqs
= BFA_IOIM_MIN
;
300 else if (cfg
->fwcfg
.num_ioim_reqs
> BFA_IOIM_MAX
)
301 cfg
->fwcfg
.num_ioim_reqs
= BFA_IOIM_MAX
;
303 *km_len
+= cfg
->fwcfg
.num_ioim_reqs
*
304 (sizeof(struct bfa_ioim_s
) + sizeof(struct bfa_ioim_sp_s
));
306 *dm_len
+= cfg
->fwcfg
.num_ioim_reqs
* BFI_IOIM_SNSLEN
;
309 * task management command memory
311 if (cfg
->fwcfg
.num_tskim_reqs
< BFA_TSKIM_MIN
)
312 cfg
->fwcfg
.num_tskim_reqs
= BFA_TSKIM_MIN
;
313 *km_len
+= cfg
->fwcfg
.num_tskim_reqs
* sizeof(struct bfa_tskim_s
);
318 bfa_fcpim_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
319 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
321 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
323 bfa_trc(bfa
, cfg
->drvcfg
.path_tov
);
324 bfa_trc(bfa
, cfg
->fwcfg
.num_rports
);
325 bfa_trc(bfa
, cfg
->fwcfg
.num_ioim_reqs
);
326 bfa_trc(bfa
, cfg
->fwcfg
.num_tskim_reqs
);
329 fcpim
->num_itnims
= cfg
->fwcfg
.num_rports
;
330 fcpim
->num_ioim_reqs
= cfg
->fwcfg
.num_ioim_reqs
;
331 fcpim
->num_tskim_reqs
= cfg
->fwcfg
.num_tskim_reqs
;
332 fcpim
->path_tov
= cfg
->drvcfg
.path_tov
;
333 fcpim
->delay_comp
= cfg
->drvcfg
.delay_comp
;
334 fcpim
->profile_comp
= NULL
;
335 fcpim
->profile_start
= NULL
;
337 bfa_itnim_attach(fcpim
, meminfo
);
338 bfa_tskim_attach(fcpim
, meminfo
);
339 bfa_ioim_attach(fcpim
, meminfo
);
343 bfa_fcpim_detach(struct bfa_s
*bfa
)
348 bfa_fcpim_start(struct bfa_s
*bfa
)
353 bfa_fcpim_stop(struct bfa_s
*bfa
)
358 bfa_fcpim_iocdisable(struct bfa_s
*bfa
)
360 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
361 struct bfa_itnim_s
*itnim
;
362 struct list_head
*qe
, *qen
;
364 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
365 itnim
= (struct bfa_itnim_s
*) qe
;
366 bfa_itnim_iocdisable(itnim
);
371 bfa_fcpim_path_tov_set(struct bfa_s
*bfa
, u16 path_tov
)
373 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
375 fcpim
->path_tov
= path_tov
* 1000;
376 if (fcpim
->path_tov
> BFA_FCPIM_PATHTOV_MAX
)
377 fcpim
->path_tov
= BFA_FCPIM_PATHTOV_MAX
;
381 bfa_fcpim_path_tov_get(struct bfa_s
*bfa
)
383 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
385 return fcpim
->path_tov
/ 1000;
389 bfa_fcpim_qdepth_get(struct bfa_s
*bfa
)
391 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
393 return fcpim
->q_depth
;
397 * BFA ITNIM module state machine functions
401 * Beginning/unallocated state - no events expected.
404 bfa_itnim_sm_uninit(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
406 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
407 bfa_trc(itnim
->bfa
, event
);
410 case BFA_ITNIM_SM_CREATE
:
411 bfa_sm_set_state(itnim
, bfa_itnim_sm_created
);
412 itnim
->is_online
= BFA_FALSE
;
413 bfa_fcpim_additn(itnim
);
417 bfa_sm_fault(itnim
->bfa
, event
);
422 * Beginning state, only online event expected.
425 bfa_itnim_sm_created(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
427 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
428 bfa_trc(itnim
->bfa
, event
);
431 case BFA_ITNIM_SM_ONLINE
:
432 if (bfa_itnim_send_fwcreate(itnim
))
433 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
435 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate_qfull
);
438 case BFA_ITNIM_SM_DELETE
:
439 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
440 bfa_fcpim_delitn(itnim
);
443 case BFA_ITNIM_SM_HWFAIL
:
444 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
448 bfa_sm_fault(itnim
->bfa
, event
);
453 * Waiting for itnim create response from firmware.
456 bfa_itnim_sm_fwcreate(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
458 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
459 bfa_trc(itnim
->bfa
, event
);
462 case BFA_ITNIM_SM_FWRSP
:
463 bfa_sm_set_state(itnim
, bfa_itnim_sm_online
);
464 itnim
->is_online
= BFA_TRUE
;
465 bfa_itnim_iotov_online(itnim
);
466 bfa_itnim_online_cb(itnim
);
469 case BFA_ITNIM_SM_DELETE
:
470 bfa_sm_set_state(itnim
, bfa_itnim_sm_delete_pending
);
473 case BFA_ITNIM_SM_OFFLINE
:
474 if (bfa_itnim_send_fwdelete(itnim
))
475 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete
);
477 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete_qfull
);
480 case BFA_ITNIM_SM_HWFAIL
:
481 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
485 bfa_sm_fault(itnim
->bfa
, event
);
490 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s
*itnim
,
491 enum bfa_itnim_event event
)
493 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
494 bfa_trc(itnim
->bfa
, event
);
497 case BFA_ITNIM_SM_QRESUME
:
498 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
499 bfa_itnim_send_fwcreate(itnim
);
502 case BFA_ITNIM_SM_DELETE
:
503 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
504 bfa_reqq_wcancel(&itnim
->reqq_wait
);
505 bfa_fcpim_delitn(itnim
);
508 case BFA_ITNIM_SM_OFFLINE
:
509 bfa_sm_set_state(itnim
, bfa_itnim_sm_offline
);
510 bfa_reqq_wcancel(&itnim
->reqq_wait
);
511 bfa_itnim_offline_cb(itnim
);
514 case BFA_ITNIM_SM_HWFAIL
:
515 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
516 bfa_reqq_wcancel(&itnim
->reqq_wait
);
520 bfa_sm_fault(itnim
->bfa
, event
);
525 * Waiting for itnim create response from firmware, a delete is pending.
528 bfa_itnim_sm_delete_pending(struct bfa_itnim_s
*itnim
,
529 enum bfa_itnim_event event
)
531 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
532 bfa_trc(itnim
->bfa
, event
);
535 case BFA_ITNIM_SM_FWRSP
:
536 if (bfa_itnim_send_fwdelete(itnim
))
537 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
539 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting_qfull
);
542 case BFA_ITNIM_SM_HWFAIL
:
543 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
544 bfa_fcpim_delitn(itnim
);
548 bfa_sm_fault(itnim
->bfa
, event
);
553 * Online state - normal parking state.
556 bfa_itnim_sm_online(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
558 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
559 bfa_trc(itnim
->bfa
, event
);
562 case BFA_ITNIM_SM_OFFLINE
:
563 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_offline
);
564 itnim
->is_online
= BFA_FALSE
;
565 bfa_itnim_iotov_start(itnim
);
566 bfa_itnim_cleanup(itnim
);
569 case BFA_ITNIM_SM_DELETE
:
570 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_delete
);
571 itnim
->is_online
= BFA_FALSE
;
572 bfa_itnim_cleanup(itnim
);
575 case BFA_ITNIM_SM_SLER
:
576 bfa_sm_set_state(itnim
, bfa_itnim_sm_sler
);
577 itnim
->is_online
= BFA_FALSE
;
578 bfa_itnim_iotov_start(itnim
);
579 bfa_itnim_sler_cb(itnim
);
582 case BFA_ITNIM_SM_HWFAIL
:
583 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
584 itnim
->is_online
= BFA_FALSE
;
585 bfa_itnim_iotov_start(itnim
);
586 bfa_itnim_iocdisable_cleanup(itnim
);
590 bfa_sm_fault(itnim
->bfa
, event
);
595 * Second level error recovery need.
598 bfa_itnim_sm_sler(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
600 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
601 bfa_trc(itnim
->bfa
, event
);
604 case BFA_ITNIM_SM_OFFLINE
:
605 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_offline
);
606 bfa_itnim_cleanup(itnim
);
609 case BFA_ITNIM_SM_DELETE
:
610 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_delete
);
611 bfa_itnim_cleanup(itnim
);
612 bfa_itnim_iotov_delete(itnim
);
615 case BFA_ITNIM_SM_HWFAIL
:
616 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
617 bfa_itnim_iocdisable_cleanup(itnim
);
621 bfa_sm_fault(itnim
->bfa
, event
);
626 * Going offline. Waiting for active IO cleanup.
629 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s
*itnim
,
630 enum bfa_itnim_event event
)
632 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
633 bfa_trc(itnim
->bfa
, event
);
636 case BFA_ITNIM_SM_CLEANUP
:
637 if (bfa_itnim_send_fwdelete(itnim
))
638 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete
);
640 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete_qfull
);
643 case BFA_ITNIM_SM_DELETE
:
644 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_delete
);
645 bfa_itnim_iotov_delete(itnim
);
648 case BFA_ITNIM_SM_HWFAIL
:
649 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
650 bfa_itnim_iocdisable_cleanup(itnim
);
651 bfa_itnim_offline_cb(itnim
);
654 case BFA_ITNIM_SM_SLER
:
658 bfa_sm_fault(itnim
->bfa
, event
);
663 * Deleting itnim. Waiting for active IO cleanup.
666 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s
*itnim
,
667 enum bfa_itnim_event event
)
669 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
670 bfa_trc(itnim
->bfa
, event
);
673 case BFA_ITNIM_SM_CLEANUP
:
674 if (bfa_itnim_send_fwdelete(itnim
))
675 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
677 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting_qfull
);
680 case BFA_ITNIM_SM_HWFAIL
:
681 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
682 bfa_itnim_iocdisable_cleanup(itnim
);
686 bfa_sm_fault(itnim
->bfa
, event
);
691 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
694 bfa_itnim_sm_fwdelete(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
696 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
697 bfa_trc(itnim
->bfa
, event
);
700 case BFA_ITNIM_SM_FWRSP
:
701 bfa_sm_set_state(itnim
, bfa_itnim_sm_offline
);
702 bfa_itnim_offline_cb(itnim
);
705 case BFA_ITNIM_SM_DELETE
:
706 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
709 case BFA_ITNIM_SM_HWFAIL
:
710 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
711 bfa_itnim_offline_cb(itnim
);
715 bfa_sm_fault(itnim
->bfa
, event
);
720 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s
*itnim
,
721 enum bfa_itnim_event event
)
723 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
724 bfa_trc(itnim
->bfa
, event
);
727 case BFA_ITNIM_SM_QRESUME
:
728 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete
);
729 bfa_itnim_send_fwdelete(itnim
);
732 case BFA_ITNIM_SM_DELETE
:
733 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting_qfull
);
736 case BFA_ITNIM_SM_HWFAIL
:
737 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
738 bfa_reqq_wcancel(&itnim
->reqq_wait
);
739 bfa_itnim_offline_cb(itnim
);
743 bfa_sm_fault(itnim
->bfa
, event
);
751 bfa_itnim_sm_offline(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
753 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
754 bfa_trc(itnim
->bfa
, event
);
757 case BFA_ITNIM_SM_DELETE
:
758 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
759 bfa_itnim_iotov_delete(itnim
);
760 bfa_fcpim_delitn(itnim
);
763 case BFA_ITNIM_SM_ONLINE
:
764 if (bfa_itnim_send_fwcreate(itnim
))
765 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
767 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate_qfull
);
770 case BFA_ITNIM_SM_HWFAIL
:
771 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
775 bfa_sm_fault(itnim
->bfa
, event
);
780 bfa_itnim_sm_iocdisable(struct bfa_itnim_s
*itnim
,
781 enum bfa_itnim_event event
)
783 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
784 bfa_trc(itnim
->bfa
, event
);
787 case BFA_ITNIM_SM_DELETE
:
788 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
789 bfa_itnim_iotov_delete(itnim
);
790 bfa_fcpim_delitn(itnim
);
793 case BFA_ITNIM_SM_OFFLINE
:
794 bfa_itnim_offline_cb(itnim
);
797 case BFA_ITNIM_SM_ONLINE
:
798 if (bfa_itnim_send_fwcreate(itnim
))
799 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
801 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate_qfull
);
804 case BFA_ITNIM_SM_HWFAIL
:
808 bfa_sm_fault(itnim
->bfa
, event
);
813 * Itnim is deleted, waiting for firmware response to delete.
816 bfa_itnim_sm_deleting(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
818 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
819 bfa_trc(itnim
->bfa
, event
);
822 case BFA_ITNIM_SM_FWRSP
:
823 case BFA_ITNIM_SM_HWFAIL
:
824 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
825 bfa_fcpim_delitn(itnim
);
829 bfa_sm_fault(itnim
->bfa
, event
);
834 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s
*itnim
,
835 enum bfa_itnim_event event
)
837 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
838 bfa_trc(itnim
->bfa
, event
);
841 case BFA_ITNIM_SM_QRESUME
:
842 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
843 bfa_itnim_send_fwdelete(itnim
);
846 case BFA_ITNIM_SM_HWFAIL
:
847 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
848 bfa_reqq_wcancel(&itnim
->reqq_wait
);
849 bfa_fcpim_delitn(itnim
);
853 bfa_sm_fault(itnim
->bfa
, event
);
858 * Initiate cleanup of all IOs on an IOC failure.
861 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s
*itnim
)
863 struct bfa_tskim_s
*tskim
;
864 struct bfa_ioim_s
*ioim
;
865 struct list_head
*qe
, *qen
;
867 list_for_each_safe(qe
, qen
, &itnim
->tsk_q
) {
868 tskim
= (struct bfa_tskim_s
*) qe
;
869 bfa_tskim_iocdisable(tskim
);
872 list_for_each_safe(qe
, qen
, &itnim
->io_q
) {
873 ioim
= (struct bfa_ioim_s
*) qe
;
874 bfa_ioim_iocdisable(ioim
);
878 * For IO request in pending queue, we pretend an early timeout.
880 list_for_each_safe(qe
, qen
, &itnim
->pending_q
) {
881 ioim
= (struct bfa_ioim_s
*) qe
;
885 list_for_each_safe(qe
, qen
, &itnim
->io_cleanup_q
) {
886 ioim
= (struct bfa_ioim_s
*) qe
;
887 bfa_ioim_iocdisable(ioim
);
892 * IO cleanup completion
895 bfa_itnim_cleanp_comp(void *itnim_cbarg
)
897 struct bfa_itnim_s
*itnim
= itnim_cbarg
;
899 bfa_stats(itnim
, cleanup_comps
);
900 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_CLEANUP
);
904 * Initiate cleanup of all IOs.
907 bfa_itnim_cleanup(struct bfa_itnim_s
*itnim
)
909 struct bfa_ioim_s
*ioim
;
910 struct bfa_tskim_s
*tskim
;
911 struct list_head
*qe
, *qen
;
913 bfa_wc_init(&itnim
->wc
, bfa_itnim_cleanp_comp
, itnim
);
915 list_for_each_safe(qe
, qen
, &itnim
->io_q
) {
916 ioim
= (struct bfa_ioim_s
*) qe
;
919 * Move IO to a cleanup queue from active queue so that a later
920 * TM will not pickup this IO.
923 list_add_tail(&ioim
->qe
, &itnim
->io_cleanup_q
);
925 bfa_wc_up(&itnim
->wc
);
926 bfa_ioim_cleanup(ioim
);
929 list_for_each_safe(qe
, qen
, &itnim
->tsk_q
) {
930 tskim
= (struct bfa_tskim_s
*) qe
;
931 bfa_wc_up(&itnim
->wc
);
932 bfa_tskim_cleanup(tskim
);
935 bfa_wc_wait(&itnim
->wc
);
939 __bfa_cb_itnim_online(void *cbarg
, bfa_boolean_t complete
)
941 struct bfa_itnim_s
*itnim
= cbarg
;
944 bfa_cb_itnim_online(itnim
->ditn
);
948 __bfa_cb_itnim_offline(void *cbarg
, bfa_boolean_t complete
)
950 struct bfa_itnim_s
*itnim
= cbarg
;
953 bfa_cb_itnim_offline(itnim
->ditn
);
957 __bfa_cb_itnim_sler(void *cbarg
, bfa_boolean_t complete
)
959 struct bfa_itnim_s
*itnim
= cbarg
;
962 bfa_cb_itnim_sler(itnim
->ditn
);
966 * Call to resume any I/O requests waiting for room in request queue.
969 bfa_itnim_qresume(void *cbarg
)
971 struct bfa_itnim_s
*itnim
= cbarg
;
973 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_QRESUME
);
981 bfa_itnim_iodone(struct bfa_itnim_s
*itnim
)
983 bfa_wc_down(&itnim
->wc
);
987 bfa_itnim_tskdone(struct bfa_itnim_s
*itnim
)
989 bfa_wc_down(&itnim
->wc
);
993 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
999 *km_len
+= cfg
->fwcfg
.num_rports
* sizeof(struct bfa_itnim_s
);
1003 bfa_itnim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
1005 struct bfa_s
*bfa
= fcpim
->bfa
;
1006 struct bfa_itnim_s
*itnim
;
1009 INIT_LIST_HEAD(&fcpim
->itnim_q
);
1011 itnim
= (struct bfa_itnim_s
*) bfa_meminfo_kva(minfo
);
1012 fcpim
->itnim_arr
= itnim
;
1014 for (i
= 0; i
< fcpim
->num_itnims
; i
++, itnim
++) {
1015 memset(itnim
, 0, sizeof(struct bfa_itnim_s
));
1017 itnim
->fcpim
= fcpim
;
1018 itnim
->reqq
= BFA_REQQ_QOS_LO
;
1019 itnim
->rport
= BFA_RPORT_FROM_TAG(bfa
, i
);
1020 itnim
->iotov_active
= BFA_FALSE
;
1021 bfa_reqq_winit(&itnim
->reqq_wait
, bfa_itnim_qresume
, itnim
);
1023 INIT_LIST_HEAD(&itnim
->io_q
);
1024 INIT_LIST_HEAD(&itnim
->io_cleanup_q
);
1025 INIT_LIST_HEAD(&itnim
->pending_q
);
1026 INIT_LIST_HEAD(&itnim
->tsk_q
);
1027 INIT_LIST_HEAD(&itnim
->delay_comp_q
);
1028 for (j
= 0; j
< BFA_IOBUCKET_MAX
; j
++)
1029 itnim
->ioprofile
.io_latency
.min
[j
] = ~0;
1030 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
1033 bfa_meminfo_kva(minfo
) = (u8
*) itnim
;
1037 bfa_itnim_iocdisable(struct bfa_itnim_s
*itnim
)
1039 bfa_stats(itnim
, ioc_disabled
);
1040 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_HWFAIL
);
1043 static bfa_boolean_t
1044 bfa_itnim_send_fwcreate(struct bfa_itnim_s
*itnim
)
1046 struct bfi_itnim_create_req_s
*m
;
1051 * check for room in queue to send request now
1053 m
= bfa_reqq_next(itnim
->bfa
, itnim
->reqq
);
1055 bfa_reqq_wait(itnim
->bfa
, itnim
->reqq
, &itnim
->reqq_wait
);
1059 bfi_h2i_set(m
->mh
, BFI_MC_ITNIM
, BFI_ITNIM_H2I_CREATE_REQ
,
1060 bfa_lpuid(itnim
->bfa
));
1061 m
->fw_handle
= itnim
->rport
->fw_handle
;
1062 m
->class = FC_CLASS_3
;
1063 m
->seq_rec
= itnim
->seq_rec
;
1064 m
->msg_no
= itnim
->msg_no
;
1065 bfa_stats(itnim
, fw_create
);
1068 * queue I/O message to firmware
1070 bfa_reqq_produce(itnim
->bfa
, itnim
->reqq
);
1074 static bfa_boolean_t
1075 bfa_itnim_send_fwdelete(struct bfa_itnim_s
*itnim
)
1077 struct bfi_itnim_delete_req_s
*m
;
1080 * check for room in queue to send request now
1082 m
= bfa_reqq_next(itnim
->bfa
, itnim
->reqq
);
1084 bfa_reqq_wait(itnim
->bfa
, itnim
->reqq
, &itnim
->reqq_wait
);
1088 bfi_h2i_set(m
->mh
, BFI_MC_ITNIM
, BFI_ITNIM_H2I_DELETE_REQ
,
1089 bfa_lpuid(itnim
->bfa
));
1090 m
->fw_handle
= itnim
->rport
->fw_handle
;
1091 bfa_stats(itnim
, fw_delete
);
1094 * queue I/O message to firmware
1096 bfa_reqq_produce(itnim
->bfa
, itnim
->reqq
);
1101 * Cleanup all pending failed inflight requests.
1104 bfa_itnim_delayed_comp(struct bfa_itnim_s
*itnim
, bfa_boolean_t iotov
)
1106 struct bfa_ioim_s
*ioim
;
1107 struct list_head
*qe
, *qen
;
1109 list_for_each_safe(qe
, qen
, &itnim
->delay_comp_q
) {
1110 ioim
= (struct bfa_ioim_s
*)qe
;
1111 bfa_ioim_delayed_comp(ioim
, iotov
);
1116 * Start all pending IO requests.
1119 bfa_itnim_iotov_online(struct bfa_itnim_s
*itnim
)
1121 struct bfa_ioim_s
*ioim
;
1123 bfa_itnim_iotov_stop(itnim
);
1126 * Abort all inflight IO requests in the queue
1128 bfa_itnim_delayed_comp(itnim
, BFA_FALSE
);
1131 * Start all pending IO requests.
1133 while (!list_empty(&itnim
->pending_q
)) {
1134 bfa_q_deq(&itnim
->pending_q
, &ioim
);
1135 list_add_tail(&ioim
->qe
, &itnim
->io_q
);
1136 bfa_ioim_start(ioim
);
1141 * Fail all pending IO requests
1144 bfa_itnim_iotov_cleanup(struct bfa_itnim_s
*itnim
)
1146 struct bfa_ioim_s
*ioim
;
1149 * Fail all inflight IO requests in the queue
1151 bfa_itnim_delayed_comp(itnim
, BFA_TRUE
);
1154 * Fail any pending IO requests.
1156 while (!list_empty(&itnim
->pending_q
)) {
1157 bfa_q_deq(&itnim
->pending_q
, &ioim
);
1158 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
1164 * IO TOV timer callback. Fail any pending IO requests.
1167 bfa_itnim_iotov(void *itnim_arg
)
1169 struct bfa_itnim_s
*itnim
= itnim_arg
;
1171 itnim
->iotov_active
= BFA_FALSE
;
1173 bfa_cb_itnim_tov_begin(itnim
->ditn
);
1174 bfa_itnim_iotov_cleanup(itnim
);
1175 bfa_cb_itnim_tov(itnim
->ditn
);
1179 * Start IO TOV timer for failing back pending IO requests in offline state.
1182 bfa_itnim_iotov_start(struct bfa_itnim_s
*itnim
)
1184 if (itnim
->fcpim
->path_tov
> 0) {
1186 itnim
->iotov_active
= BFA_TRUE
;
1187 WARN_ON(!bfa_itnim_hold_io(itnim
));
1188 bfa_timer_start(itnim
->bfa
, &itnim
->timer
,
1189 bfa_itnim_iotov
, itnim
, itnim
->fcpim
->path_tov
);
1194 * Stop IO TOV timer.
1197 bfa_itnim_iotov_stop(struct bfa_itnim_s
*itnim
)
1199 if (itnim
->iotov_active
) {
1200 itnim
->iotov_active
= BFA_FALSE
;
1201 bfa_timer_stop(&itnim
->timer
);
1206 * Stop IO TOV timer.
1209 bfa_itnim_iotov_delete(struct bfa_itnim_s
*itnim
)
1211 bfa_boolean_t pathtov_active
= BFA_FALSE
;
1213 if (itnim
->iotov_active
)
1214 pathtov_active
= BFA_TRUE
;
1216 bfa_itnim_iotov_stop(itnim
);
1218 bfa_cb_itnim_tov_begin(itnim
->ditn
);
1219 bfa_itnim_iotov_cleanup(itnim
);
1221 bfa_cb_itnim_tov(itnim
->ditn
);
1225 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s
*itnim
)
1227 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(itnim
->bfa
);
1228 fcpim
->del_itn_stats
.del_itn_iocomp_aborted
+=
1229 itnim
->stats
.iocomp_aborted
;
1230 fcpim
->del_itn_stats
.del_itn_iocomp_timedout
+=
1231 itnim
->stats
.iocomp_timedout
;
1232 fcpim
->del_itn_stats
.del_itn_iocom_sqer_needed
+=
1233 itnim
->stats
.iocom_sqer_needed
;
1234 fcpim
->del_itn_stats
.del_itn_iocom_res_free
+=
1235 itnim
->stats
.iocom_res_free
;
1236 fcpim
->del_itn_stats
.del_itn_iocom_hostabrts
+=
1237 itnim
->stats
.iocom_hostabrts
;
1238 fcpim
->del_itn_stats
.del_itn_total_ios
+= itnim
->stats
.total_ios
;
1239 fcpim
->del_itn_stats
.del_io_iocdowns
+= itnim
->stats
.io_iocdowns
;
1240 fcpim
->del_itn_stats
.del_tm_iocdowns
+= itnim
->stats
.tm_iocdowns
;
1248 * Itnim interrupt processing.
1251 bfa_itnim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1253 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1254 union bfi_itnim_i2h_msg_u msg
;
1255 struct bfa_itnim_s
*itnim
;
1257 bfa_trc(bfa
, m
->mhdr
.msg_id
);
1261 switch (m
->mhdr
.msg_id
) {
1262 case BFI_ITNIM_I2H_CREATE_RSP
:
1263 itnim
= BFA_ITNIM_FROM_TAG(fcpim
,
1264 msg
.create_rsp
->bfa_handle
);
1265 WARN_ON(msg
.create_rsp
->status
!= BFA_STATUS_OK
);
1266 bfa_stats(itnim
, create_comps
);
1267 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_FWRSP
);
1270 case BFI_ITNIM_I2H_DELETE_RSP
:
1271 itnim
= BFA_ITNIM_FROM_TAG(fcpim
,
1272 msg
.delete_rsp
->bfa_handle
);
1273 WARN_ON(msg
.delete_rsp
->status
!= BFA_STATUS_OK
);
1274 bfa_stats(itnim
, delete_comps
);
1275 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_FWRSP
);
1278 case BFI_ITNIM_I2H_SLER_EVENT
:
1279 itnim
= BFA_ITNIM_FROM_TAG(fcpim
,
1280 msg
.sler_event
->bfa_handle
);
1281 bfa_stats(itnim
, sler_events
);
1282 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_SLER
);
1286 bfa_trc(bfa
, m
->mhdr
.msg_id
);
1295 struct bfa_itnim_s
*
1296 bfa_itnim_create(struct bfa_s
*bfa
, struct bfa_rport_s
*rport
, void *ditn
)
1298 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1299 struct bfa_itnim_s
*itnim
;
1301 itnim
= BFA_ITNIM_FROM_TAG(fcpim
, rport
->rport_tag
);
1302 WARN_ON(itnim
->rport
!= rport
);
1306 bfa_stats(itnim
, creates
);
1307 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_CREATE
);
1313 bfa_itnim_delete(struct bfa_itnim_s
*itnim
)
1315 bfa_stats(itnim
, deletes
);
1316 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_DELETE
);
1320 bfa_itnim_online(struct bfa_itnim_s
*itnim
, bfa_boolean_t seq_rec
)
1322 itnim
->seq_rec
= seq_rec
;
1323 bfa_stats(itnim
, onlines
);
1324 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_ONLINE
);
1328 bfa_itnim_offline(struct bfa_itnim_s
*itnim
)
1330 bfa_stats(itnim
, offlines
);
1331 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_OFFLINE
);
1335 * Return true if itnim is considered offline for holding off IO request.
1336 * IO is not held if itnim is being deleted.
1339 bfa_itnim_hold_io(struct bfa_itnim_s
*itnim
)
1341 return itnim
->fcpim
->path_tov
&& itnim
->iotov_active
&&
1342 (bfa_sm_cmp_state(itnim
, bfa_itnim_sm_fwcreate
) ||
1343 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_sler
) ||
1344 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_cleanup_offline
) ||
1345 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_fwdelete
) ||
1346 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_offline
) ||
1347 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_iocdisable
));
1351 bfa_itnim_clear_stats(struct bfa_itnim_s
*itnim
)
1354 memset(&itnim
->stats
, 0, sizeof(itnim
->stats
));
1355 memset(&itnim
->ioprofile
, 0, sizeof(itnim
->ioprofile
));
1356 for (j
= 0; j
< BFA_IOBUCKET_MAX
; j
++)
1357 itnim
->ioprofile
.io_latency
.min
[j
] = ~0;
1361 * BFA IO module state machine functions
1365 * IO is not started (unallocated).
1368 bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1370 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1371 bfa_trc_fp(ioim
->bfa
, event
);
1374 case BFA_IOIM_SM_START
:
1375 if (!bfa_itnim_is_online(ioim
->itnim
)) {
1376 if (!bfa_itnim_hold_io(ioim
->itnim
)) {
1377 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1378 list_del(&ioim
->qe
);
1379 list_add_tail(&ioim
->qe
,
1380 &ioim
->fcpim
->ioim_comp_q
);
1381 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1382 __bfa_cb_ioim_pathtov
, ioim
);
1384 list_del(&ioim
->qe
);
1385 list_add_tail(&ioim
->qe
,
1386 &ioim
->itnim
->pending_q
);
1391 if (ioim
->nsges
> BFI_SGE_INLINE
) {
1392 if (!bfa_ioim_sgpg_alloc(ioim
)) {
1393 bfa_sm_set_state(ioim
, bfa_ioim_sm_sgalloc
);
1398 if (!bfa_ioim_send_ioreq(ioim
)) {
1399 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
1403 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1406 case BFA_IOIM_SM_IOTOV
:
1407 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1408 bfa_ioim_move_to_comp_q(ioim
);
1409 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1410 __bfa_cb_ioim_pathtov
, ioim
);
1413 case BFA_IOIM_SM_ABORT
:
1415 * IO in pending queue can get abort requests. Complete abort
1416 * requests immediately.
1418 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1419 WARN_ON(!bfa_q_is_on_q(&ioim
->itnim
->pending_q
, ioim
));
1420 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1421 __bfa_cb_ioim_abort
, ioim
);
1425 bfa_sm_fault(ioim
->bfa
, event
);
1430 * IO is waiting for SG pages.
1433 bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1435 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1436 bfa_trc(ioim
->bfa
, event
);
1439 case BFA_IOIM_SM_SGALLOCED
:
1440 if (!bfa_ioim_send_ioreq(ioim
)) {
1441 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
1444 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1447 case BFA_IOIM_SM_CLEANUP
:
1448 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1449 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
1450 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1452 bfa_ioim_notify_cleanup(ioim
);
1455 case BFA_IOIM_SM_ABORT
:
1456 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1457 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
1458 bfa_ioim_move_to_comp_q(ioim
);
1459 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1463 case BFA_IOIM_SM_HWFAIL
:
1464 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1465 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
1466 bfa_ioim_move_to_comp_q(ioim
);
1467 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1472 bfa_sm_fault(ioim
->bfa
, event
);
1480 bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1482 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1483 bfa_trc_fp(ioim
->bfa
, event
);
1486 case BFA_IOIM_SM_COMP_GOOD
:
1487 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1488 bfa_ioim_move_to_comp_q(ioim
);
1489 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1490 __bfa_cb_ioim_good_comp
, ioim
);
1493 case BFA_IOIM_SM_COMP
:
1494 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1495 bfa_ioim_move_to_comp_q(ioim
);
1496 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
1500 case BFA_IOIM_SM_DONE
:
1501 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1502 bfa_ioim_move_to_comp_q(ioim
);
1503 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
1507 case BFA_IOIM_SM_ABORT
:
1508 ioim
->iosp
->abort_explicit
= BFA_TRUE
;
1509 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
1511 if (bfa_ioim_send_abort(ioim
))
1512 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
1514 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort_qfull
);
1515 bfa_stats(ioim
->itnim
, qwait
);
1516 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1517 &ioim
->iosp
->reqq_wait
);
1521 case BFA_IOIM_SM_CLEANUP
:
1522 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
1523 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
1525 if (bfa_ioim_send_abort(ioim
))
1526 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
1528 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
1529 bfa_stats(ioim
->itnim
, qwait
);
1530 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1531 &ioim
->iosp
->reqq_wait
);
1535 case BFA_IOIM_SM_HWFAIL
:
1536 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1537 bfa_ioim_move_to_comp_q(ioim
);
1538 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1542 case BFA_IOIM_SM_SQRETRY
:
1543 if (bfa_ioim_maxretry_reached(ioim
)) {
1544 /* max retry reached, free IO */
1545 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1546 bfa_ioim_move_to_comp_q(ioim
);
1547 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1548 __bfa_cb_ioim_failed
, ioim
);
1551 /* waiting for IO tag resource free */
1552 bfa_sm_set_state(ioim
, bfa_ioim_sm_cmnd_retry
);
1556 bfa_sm_fault(ioim
->bfa
, event
);
1561 * IO is retried with new tag.
1564 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1566 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1567 bfa_trc_fp(ioim
->bfa
, event
);
1570 case BFA_IOIM_SM_FREE
:
1571 /* abts and rrq done. Now retry the IO with new tag */
1572 bfa_ioim_update_iotag(ioim
);
1573 if (!bfa_ioim_send_ioreq(ioim
)) {
1574 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
1577 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1580 case BFA_IOIM_SM_CLEANUP
:
1581 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
1582 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
1584 if (bfa_ioim_send_abort(ioim
))
1585 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
1587 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
1588 bfa_stats(ioim
->itnim
, qwait
);
1589 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1590 &ioim
->iosp
->reqq_wait
);
1594 case BFA_IOIM_SM_HWFAIL
:
1595 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1596 bfa_ioim_move_to_comp_q(ioim
);
1597 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1598 __bfa_cb_ioim_failed
, ioim
);
1601 case BFA_IOIM_SM_ABORT
:
1602 /* in this state IO abort is done.
1603 * Waiting for IO tag resource free.
1605 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1606 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1611 bfa_sm_fault(ioim
->bfa
, event
);
1616 * IO is being aborted, waiting for completion from firmware.
1619 bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1621 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1622 bfa_trc(ioim
->bfa
, event
);
1625 case BFA_IOIM_SM_COMP_GOOD
:
1626 case BFA_IOIM_SM_COMP
:
1627 case BFA_IOIM_SM_DONE
:
1628 case BFA_IOIM_SM_FREE
:
1631 case BFA_IOIM_SM_ABORT_DONE
:
1632 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1633 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1637 case BFA_IOIM_SM_ABORT_COMP
:
1638 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1639 bfa_ioim_move_to_comp_q(ioim
);
1640 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1644 case BFA_IOIM_SM_COMP_UTAG
:
1645 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1646 bfa_ioim_move_to_comp_q(ioim
);
1647 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1651 case BFA_IOIM_SM_CLEANUP
:
1652 WARN_ON(ioim
->iosp
->abort_explicit
!= BFA_TRUE
);
1653 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
1655 if (bfa_ioim_send_abort(ioim
))
1656 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
1658 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
1659 bfa_stats(ioim
->itnim
, qwait
);
1660 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1661 &ioim
->iosp
->reqq_wait
);
1665 case BFA_IOIM_SM_HWFAIL
:
1666 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1667 bfa_ioim_move_to_comp_q(ioim
);
1668 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1673 bfa_sm_fault(ioim
->bfa
, event
);
1678 * IO is being cleaned up (implicit abort), waiting for completion from
1682 bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1684 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1685 bfa_trc(ioim
->bfa
, event
);
1688 case BFA_IOIM_SM_COMP_GOOD
:
1689 case BFA_IOIM_SM_COMP
:
1690 case BFA_IOIM_SM_DONE
:
1691 case BFA_IOIM_SM_FREE
:
1694 case BFA_IOIM_SM_ABORT
:
1696 * IO is already being aborted implicitly
1698 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
1701 case BFA_IOIM_SM_ABORT_DONE
:
1702 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1703 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1704 bfa_ioim_notify_cleanup(ioim
);
1707 case BFA_IOIM_SM_ABORT_COMP
:
1708 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1709 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1710 bfa_ioim_notify_cleanup(ioim
);
1713 case BFA_IOIM_SM_COMP_UTAG
:
1714 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1715 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1716 bfa_ioim_notify_cleanup(ioim
);
1719 case BFA_IOIM_SM_HWFAIL
:
1720 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1721 bfa_ioim_move_to_comp_q(ioim
);
1722 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1726 case BFA_IOIM_SM_CLEANUP
:
1728 * IO can be in cleanup state already due to TM command.
1729 * 2nd cleanup request comes from ITN offline event.
1734 bfa_sm_fault(ioim
->bfa
, event
);
1739 * IO is waiting for room in request CQ
1742 bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1744 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1745 bfa_trc(ioim
->bfa
, event
);
1748 case BFA_IOIM_SM_QRESUME
:
1749 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1750 bfa_ioim_send_ioreq(ioim
);
1753 case BFA_IOIM_SM_ABORT
:
1754 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1755 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1756 bfa_ioim_move_to_comp_q(ioim
);
1757 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1761 case BFA_IOIM_SM_CLEANUP
:
1762 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1763 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1764 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1766 bfa_ioim_notify_cleanup(ioim
);
1769 case BFA_IOIM_SM_HWFAIL
:
1770 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1771 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1772 bfa_ioim_move_to_comp_q(ioim
);
1773 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1778 bfa_sm_fault(ioim
->bfa
, event
);
1783 * Active IO is being aborted, waiting for room in request CQ.
1786 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1788 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1789 bfa_trc(ioim
->bfa
, event
);
1792 case BFA_IOIM_SM_QRESUME
:
1793 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
1794 bfa_ioim_send_abort(ioim
);
1797 case BFA_IOIM_SM_CLEANUP
:
1798 WARN_ON(ioim
->iosp
->abort_explicit
!= BFA_TRUE
);
1799 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
1800 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
1803 case BFA_IOIM_SM_COMP_GOOD
:
1804 case BFA_IOIM_SM_COMP
:
1805 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1806 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1807 bfa_ioim_move_to_comp_q(ioim
);
1808 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1812 case BFA_IOIM_SM_DONE
:
1813 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1814 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1815 bfa_ioim_move_to_comp_q(ioim
);
1816 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1820 case BFA_IOIM_SM_HWFAIL
:
1821 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1822 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1823 bfa_ioim_move_to_comp_q(ioim
);
1824 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1829 bfa_sm_fault(ioim
->bfa
, event
);
1834 * Active IO is being cleaned up, waiting for room in request CQ.
1837 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1839 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1840 bfa_trc(ioim
->bfa
, event
);
1843 case BFA_IOIM_SM_QRESUME
:
1844 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
1845 bfa_ioim_send_abort(ioim
);
1848 case BFA_IOIM_SM_ABORT
:
1850 * IO is alraedy being cleaned up implicitly
1852 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
1855 case BFA_IOIM_SM_COMP_GOOD
:
1856 case BFA_IOIM_SM_COMP
:
1857 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1858 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1859 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1860 bfa_ioim_notify_cleanup(ioim
);
1863 case BFA_IOIM_SM_DONE
:
1864 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1865 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1866 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1867 bfa_ioim_notify_cleanup(ioim
);
1870 case BFA_IOIM_SM_HWFAIL
:
1871 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1872 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
1873 bfa_ioim_move_to_comp_q(ioim
);
1874 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1879 bfa_sm_fault(ioim
->bfa
, event
);
1884 * IO bfa callback is pending.
1887 bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1889 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1890 bfa_trc_fp(ioim
->bfa
, event
);
1893 case BFA_IOIM_SM_HCB
:
1894 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
1895 bfa_ioim_free(ioim
);
1898 case BFA_IOIM_SM_CLEANUP
:
1899 bfa_ioim_notify_cleanup(ioim
);
1902 case BFA_IOIM_SM_HWFAIL
:
1906 bfa_sm_fault(ioim
->bfa
, event
);
1911 * IO bfa callback is pending. IO resource cannot be freed.
1914 bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1916 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1917 bfa_trc(ioim
->bfa
, event
);
1920 case BFA_IOIM_SM_HCB
:
1921 bfa_sm_set_state(ioim
, bfa_ioim_sm_resfree
);
1922 list_del(&ioim
->qe
);
1923 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_resfree_q
);
1926 case BFA_IOIM_SM_FREE
:
1927 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1930 case BFA_IOIM_SM_CLEANUP
:
1931 bfa_ioim_notify_cleanup(ioim
);
1934 case BFA_IOIM_SM_HWFAIL
:
1935 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1939 bfa_sm_fault(ioim
->bfa
, event
);
1944 * IO is completed, waiting resource free from firmware.
1947 bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1949 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1950 bfa_trc(ioim
->bfa
, event
);
1953 case BFA_IOIM_SM_FREE
:
1954 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
1955 bfa_ioim_free(ioim
);
1958 case BFA_IOIM_SM_CLEANUP
:
1959 bfa_ioim_notify_cleanup(ioim
);
1962 case BFA_IOIM_SM_HWFAIL
:
1966 bfa_sm_fault(ioim
->bfa
, event
);
1972 __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
)
1974 struct bfa_ioim_s
*ioim
= cbarg
;
1977 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
1981 bfa_cb_ioim_good_comp(ioim
->bfa
->bfad
, ioim
->dio
);
1985 __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
)
1987 struct bfa_ioim_s
*ioim
= cbarg
;
1988 struct bfi_ioim_rsp_s
*m
;
1994 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
1998 m
= (struct bfi_ioim_rsp_s
*) &ioim
->iosp
->comp_rspmsg
;
1999 if (m
->io_status
== BFI_IOIM_STS_OK
) {
2001 * setup sense information, if present
2003 if ((m
->scsi_status
== SCSI_STATUS_CHECK_CONDITION
) &&
2005 sns_len
= m
->sns_len
;
2006 snsinfo
= ioim
->iosp
->snsinfo
;
2010 * setup residue value correctly for normal completions
2012 if (m
->resid_flags
== FCP_RESID_UNDER
) {
2013 residue
= be32_to_cpu(m
->residue
);
2014 bfa_stats(ioim
->itnim
, iocomp_underrun
);
2016 if (m
->resid_flags
== FCP_RESID_OVER
) {
2017 residue
= be32_to_cpu(m
->residue
);
2019 bfa_stats(ioim
->itnim
, iocomp_overrun
);
2023 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, m
->io_status
,
2024 m
->scsi_status
, sns_len
, snsinfo
, residue
);
2028 __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
)
2030 struct bfa_ioim_s
*ioim
= cbarg
;
2033 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2037 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_ABORTED
,
2042 __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
)
2044 struct bfa_ioim_s
*ioim
= cbarg
;
2046 bfa_stats(ioim
->itnim
, path_tov_expired
);
2048 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2052 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_PATHTOV
,
2057 __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
)
2059 struct bfa_ioim_s
*ioim
= cbarg
;
2062 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2066 bfa_cb_ioim_abort(ioim
->bfa
->bfad
, ioim
->dio
);
2070 bfa_ioim_sgpg_alloced(void *cbarg
)
2072 struct bfa_ioim_s
*ioim
= cbarg
;
2074 ioim
->nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
2075 list_splice_tail_init(&ioim
->iosp
->sgpg_wqe
.sgpg_q
, &ioim
->sgpg_q
);
2076 ioim
->sgpg
= bfa_q_first(&ioim
->sgpg_q
);
2077 bfa_sm_send_event(ioim
, BFA_IOIM_SM_SGALLOCED
);
2081 * Send I/O request to firmware.
2083 static bfa_boolean_t
2084 bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
)
2086 struct bfa_itnim_s
*itnim
= ioim
->itnim
;
2087 struct bfi_ioim_req_s
*m
;
2088 static struct fcp_cmnd_s cmnd_z0
= { { { 0 } } };
2089 struct bfi_sge_s
*sge
, *sgpge
;
2093 struct scatterlist
*sg
;
2094 struct bfa_sgpg_s
*sgpg
;
2095 struct scsi_cmnd
*cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
2096 u32 i
, sge_id
, pgcumsz
;
2097 enum dma_data_direction dmadir
;
2100 * check for room in queue to send request now
2102 m
= bfa_reqq_next(ioim
->bfa
, ioim
->reqq
);
2104 bfa_stats(ioim
->itnim
, qwait
);
2105 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
2106 &ioim
->iosp
->reqq_wait
);
2111 * build i/o request message next
2113 m
->io_tag
= cpu_to_be16(ioim
->iotag
);
2114 m
->rport_hdl
= ioim
->itnim
->rport
->fw_handle
;
2122 scsi_for_each_sg(cmnd
, sg
, ioim
->nsges
, i
) {
2124 /* build inline IO SG element */
2125 addr
= bfa_sgaddr_le(sg_dma_address(sg
));
2126 sge
->sga
= *(union bfi_addr_u
*) &addr
;
2127 pgdlen
= sg_dma_len(sg
);
2128 sge
->sg_len
= pgdlen
;
2129 sge
->flags
= (ioim
->nsges
> BFI_SGE_INLINE
) ?
2130 BFI_SGE_DATA_CPL
: BFI_SGE_DATA_LAST
;
2135 sgpge
= sgpg
->sgpg
->sges
;
2137 addr
= bfa_sgaddr_le(sg_dma_address(sg
));
2138 sgpge
->sga
= *(union bfi_addr_u
*) &addr
;
2139 sgpge
->sg_len
= sg_dma_len(sg
);
2140 pgcumsz
+= sgpge
->sg_len
;
2143 if (i
< (ioim
->nsges
- 1) &&
2144 sge_id
< (BFI_SGPG_DATA_SGES
- 1))
2145 sgpge
->flags
= BFI_SGE_DATA
;
2146 else if (i
< (ioim
->nsges
- 1))
2147 sgpge
->flags
= BFI_SGE_DATA_CPL
;
2149 sgpge
->flags
= BFI_SGE_DATA_LAST
;
2151 bfa_sge_to_le(sgpge
);
2154 if (i
== (ioim
->nsges
- 1)) {
2155 sgpge
->flags
= BFI_SGE_PGDLEN
;
2156 sgpge
->sga
.a32
.addr_lo
= 0;
2157 sgpge
->sga
.a32
.addr_hi
= 0;
2158 sgpge
->sg_len
= pgcumsz
;
2159 bfa_sge_to_le(sgpge
);
2160 } else if (++sge_id
== BFI_SGPG_DATA_SGES
) {
2161 sgpg
= (struct bfa_sgpg_s
*) bfa_q_next(sgpg
);
2162 sgpge
->flags
= BFI_SGE_LINK
;
2163 sgpge
->sga
= sgpg
->sgpg_pa
;
2164 sgpge
->sg_len
= pgcumsz
;
2165 bfa_sge_to_le(sgpge
);
2172 if (ioim
->nsges
> BFI_SGE_INLINE
) {
2173 sge
->sga
= ioim
->sgpg
->sgpg_pa
;
2175 sge
->sga
.a32
.addr_lo
= 0;
2176 sge
->sga
.a32
.addr_hi
= 0;
2178 sge
->sg_len
= pgdlen
;
2179 sge
->flags
= BFI_SGE_PGDLEN
;
2183 * set up I/O command parameters
2186 int_to_scsilun(cmnd
->device
->lun
, &m
->cmnd
.lun
);
2187 dmadir
= cmnd
->sc_data_direction
;
2188 if (dmadir
== DMA_TO_DEVICE
)
2189 m
->cmnd
.iodir
= FCP_IODIR_WRITE
;
2190 else if (dmadir
== DMA_FROM_DEVICE
)
2191 m
->cmnd
.iodir
= FCP_IODIR_READ
;
2193 m
->cmnd
.iodir
= FCP_IODIR_NONE
;
2195 m
->cmnd
.cdb
= *(scsi_cdb_t
*) cmnd
->cmnd
;
2196 fcp_dl
= scsi_bufflen(cmnd
);
2197 m
->cmnd
.fcp_dl
= cpu_to_be32(fcp_dl
);
2200 * set up I/O message header
2202 switch (m
->cmnd
.iodir
) {
2203 case FCP_IODIR_READ
:
2204 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_READ
, 0, bfa_lpuid(ioim
->bfa
));
2205 bfa_stats(itnim
, input_reqs
);
2206 ioim
->itnim
->stats
.rd_throughput
+= fcp_dl
;
2208 case FCP_IODIR_WRITE
:
2209 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_WRITE
, 0, bfa_lpuid(ioim
->bfa
));
2210 bfa_stats(itnim
, output_reqs
);
2211 ioim
->itnim
->stats
.wr_throughput
+= fcp_dl
;
2214 bfa_stats(itnim
, input_reqs
);
2215 bfa_stats(itnim
, output_reqs
);
2217 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
2219 if (itnim
->seq_rec
||
2220 (scsi_bufflen(cmnd
) & (sizeof(u32
) - 1)))
2221 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
2224 * queue I/O message to firmware
2226 bfa_reqq_produce(ioim
->bfa
, ioim
->reqq
);
2231 * Setup any additional SG pages needed.Inline SG element is setup
2234 static bfa_boolean_t
2235 bfa_ioim_sgpg_alloc(struct bfa_ioim_s
*ioim
)
2239 WARN_ON(ioim
->nsges
<= BFI_SGE_INLINE
);
2242 * allocate SG pages needed
2244 nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
2248 if (bfa_sgpg_malloc(ioim
->bfa
, &ioim
->sgpg_q
, nsgpgs
)
2250 bfa_sgpg_wait(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
, nsgpgs
);
2254 ioim
->nsgpgs
= nsgpgs
;
2255 ioim
->sgpg
= bfa_q_first(&ioim
->sgpg_q
);
2261 * Send I/O abort request to firmware.
2263 static bfa_boolean_t
2264 bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
)
2266 struct bfi_ioim_abort_req_s
*m
;
2267 enum bfi_ioim_h2i msgop
;
2270 * check for room in queue to send request now
2272 m
= bfa_reqq_next(ioim
->bfa
, ioim
->reqq
);
2277 * build i/o request message next
2279 if (ioim
->iosp
->abort_explicit
)
2280 msgop
= BFI_IOIM_H2I_IOABORT_REQ
;
2282 msgop
= BFI_IOIM_H2I_IOCLEANUP_REQ
;
2284 bfi_h2i_set(m
->mh
, BFI_MC_IOIM
, msgop
, bfa_lpuid(ioim
->bfa
));
2285 m
->io_tag
= cpu_to_be16(ioim
->iotag
);
2286 m
->abort_tag
= ++ioim
->abort_tag
;
2289 * queue I/O message to firmware
2291 bfa_reqq_produce(ioim
->bfa
, ioim
->reqq
);
2296 * Call to resume any I/O requests waiting for room in request queue.
2299 bfa_ioim_qresume(void *cbarg
)
2301 struct bfa_ioim_s
*ioim
= cbarg
;
2303 bfa_stats(ioim
->itnim
, qresumes
);
2304 bfa_sm_send_event(ioim
, BFA_IOIM_SM_QRESUME
);
2309 bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
)
2312 * Move IO from itnim queue to fcpim global queue since itnim will be
2315 list_del(&ioim
->qe
);
2316 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
2318 if (!ioim
->iosp
->tskim
) {
2319 if (ioim
->fcpim
->delay_comp
&& ioim
->itnim
->iotov_active
) {
2320 bfa_cb_dequeue(&ioim
->hcb_qe
);
2321 list_del(&ioim
->qe
);
2322 list_add_tail(&ioim
->qe
, &ioim
->itnim
->delay_comp_q
);
2324 bfa_itnim_iodone(ioim
->itnim
);
2326 bfa_wc_down(&ioim
->iosp
->tskim
->wc
);
2329 static bfa_boolean_t
2330 bfa_ioim_is_abortable(struct bfa_ioim_s
*ioim
)
2332 if ((bfa_sm_cmp_state(ioim
, bfa_ioim_sm_uninit
) &&
2333 (!bfa_q_is_on_q(&ioim
->itnim
->pending_q
, ioim
))) ||
2334 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_abort
)) ||
2335 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_abort_qfull
)) ||
2336 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_hcb
)) ||
2337 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_hcb_free
)) ||
2338 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_resfree
)))
2345 bfa_ioim_delayed_comp(struct bfa_ioim_s
*ioim
, bfa_boolean_t iotov
)
2348 * If path tov timer expired, failback with PATHTOV status - these
2349 * IO requests are not normally retried by IO stack.
2351 * Otherwise device cameback online and fail it with normal failed
2352 * status so that IO stack retries these failed IO requests.
2355 ioim
->io_cbfn
= __bfa_cb_ioim_pathtov
;
2357 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
2358 bfa_stats(ioim
->itnim
, iocom_nexus_abort
);
2360 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
2363 * Move IO to fcpim global queue since itnim will be
2366 list_del(&ioim
->qe
);
2367 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
2372 * Memory allocation and initialization.
2375 bfa_ioim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
2377 struct bfa_ioim_s
*ioim
;
2378 struct bfa_ioim_sp_s
*iosp
;
2384 * claim memory first
2386 ioim
= (struct bfa_ioim_s
*) bfa_meminfo_kva(minfo
);
2387 fcpim
->ioim_arr
= ioim
;
2388 bfa_meminfo_kva(minfo
) = (u8
*) (ioim
+ fcpim
->num_ioim_reqs
);
2390 iosp
= (struct bfa_ioim_sp_s
*) bfa_meminfo_kva(minfo
);
2391 fcpim
->ioim_sp_arr
= iosp
;
2392 bfa_meminfo_kva(minfo
) = (u8
*) (iosp
+ fcpim
->num_ioim_reqs
);
2395 * Claim DMA memory for per IO sense data.
2397 snsbufsz
= fcpim
->num_ioim_reqs
* BFI_IOIM_SNSLEN
;
2398 fcpim
->snsbase
.pa
= bfa_meminfo_dma_phys(minfo
);
2399 bfa_meminfo_dma_phys(minfo
) += snsbufsz
;
2401 fcpim
->snsbase
.kva
= bfa_meminfo_dma_virt(minfo
);
2402 bfa_meminfo_dma_virt(minfo
) += snsbufsz
;
2403 snsinfo
= fcpim
->snsbase
.kva
;
2404 bfa_iocfc_set_snsbase(fcpim
->bfa
, fcpim
->snsbase
.pa
);
2407 * Initialize ioim free queues
2409 INIT_LIST_HEAD(&fcpim
->ioim_free_q
);
2410 INIT_LIST_HEAD(&fcpim
->ioim_resfree_q
);
2411 INIT_LIST_HEAD(&fcpim
->ioim_comp_q
);
2413 for (i
= 0; i
< fcpim
->num_ioim_reqs
;
2414 i
++, ioim
++, iosp
++, snsinfo
+= BFI_IOIM_SNSLEN
) {
2418 memset(ioim
, 0, sizeof(struct bfa_ioim_s
));
2420 ioim
->bfa
= fcpim
->bfa
;
2421 ioim
->fcpim
= fcpim
;
2423 iosp
->snsinfo
= snsinfo
;
2424 INIT_LIST_HEAD(&ioim
->sgpg_q
);
2425 bfa_reqq_winit(&ioim
->iosp
->reqq_wait
,
2426 bfa_ioim_qresume
, ioim
);
2427 bfa_sgpg_winit(&ioim
->iosp
->sgpg_wqe
,
2428 bfa_ioim_sgpg_alloced
, ioim
);
2429 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
2431 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
2436 bfa_ioim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
2438 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
2439 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
2440 struct bfa_ioim_s
*ioim
;
2442 enum bfa_ioim_event evt
= BFA_IOIM_SM_COMP
;
2444 iotag
= be16_to_cpu(rsp
->io_tag
);
2446 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
2447 WARN_ON(ioim
->iotag
!= iotag
);
2449 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2450 bfa_trc(ioim
->bfa
, rsp
->io_status
);
2451 bfa_trc(ioim
->bfa
, rsp
->reuse_io_tag
);
2453 if (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_active
))
2454 ioim
->iosp
->comp_rspmsg
= *m
;
2456 switch (rsp
->io_status
) {
2457 case BFI_IOIM_STS_OK
:
2458 bfa_stats(ioim
->itnim
, iocomp_ok
);
2459 if (rsp
->reuse_io_tag
== 0)
2460 evt
= BFA_IOIM_SM_DONE
;
2462 evt
= BFA_IOIM_SM_COMP
;
2465 case BFI_IOIM_STS_TIMEDOUT
:
2466 bfa_stats(ioim
->itnim
, iocomp_timedout
);
2467 case BFI_IOIM_STS_ABORTED
:
2468 rsp
->io_status
= BFI_IOIM_STS_ABORTED
;
2469 bfa_stats(ioim
->itnim
, iocomp_aborted
);
2470 if (rsp
->reuse_io_tag
== 0)
2471 evt
= BFA_IOIM_SM_DONE
;
2473 evt
= BFA_IOIM_SM_COMP
;
2476 case BFI_IOIM_STS_PROTO_ERR
:
2477 bfa_stats(ioim
->itnim
, iocom_proto_err
);
2478 WARN_ON(!rsp
->reuse_io_tag
);
2479 evt
= BFA_IOIM_SM_COMP
;
2482 case BFI_IOIM_STS_SQER_NEEDED
:
2483 bfa_stats(ioim
->itnim
, iocom_sqer_needed
);
2484 WARN_ON(rsp
->reuse_io_tag
!= 0);
2485 evt
= BFA_IOIM_SM_SQRETRY
;
2488 case BFI_IOIM_STS_RES_FREE
:
2489 bfa_stats(ioim
->itnim
, iocom_res_free
);
2490 evt
= BFA_IOIM_SM_FREE
;
2493 case BFI_IOIM_STS_HOST_ABORTED
:
2494 bfa_stats(ioim
->itnim
, iocom_hostabrts
);
2495 if (rsp
->abort_tag
!= ioim
->abort_tag
) {
2496 bfa_trc(ioim
->bfa
, rsp
->abort_tag
);
2497 bfa_trc(ioim
->bfa
, ioim
->abort_tag
);
2501 if (rsp
->reuse_io_tag
)
2502 evt
= BFA_IOIM_SM_ABORT_COMP
;
2504 evt
= BFA_IOIM_SM_ABORT_DONE
;
2507 case BFI_IOIM_STS_UTAG
:
2508 bfa_stats(ioim
->itnim
, iocom_utags
);
2509 evt
= BFA_IOIM_SM_COMP_UTAG
;
2516 bfa_sm_send_event(ioim
, evt
);
2520 bfa_ioim_good_comp_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
2522 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
2523 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
2524 struct bfa_ioim_s
*ioim
;
2527 iotag
= be16_to_cpu(rsp
->io_tag
);
2529 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
2530 WARN_ON(BFA_IOIM_TAG_2_ID(ioim
->iotag
) != iotag
);
2532 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2533 bfa_ioim_cb_profile_comp(fcpim
, ioim
);
2535 bfa_sm_send_event(ioim
, BFA_IOIM_SM_COMP_GOOD
);
2539 * Called by itnim to clean up IO while going offline.
2542 bfa_ioim_cleanup(struct bfa_ioim_s
*ioim
)
2544 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2545 bfa_stats(ioim
->itnim
, io_cleanups
);
2547 ioim
->iosp
->tskim
= NULL
;
2548 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
2552 bfa_ioim_cleanup_tm(struct bfa_ioim_s
*ioim
, struct bfa_tskim_s
*tskim
)
2554 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2555 bfa_stats(ioim
->itnim
, io_tmaborts
);
2557 ioim
->iosp
->tskim
= tskim
;
2558 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
2562 * IOC failure handling.
2565 bfa_ioim_iocdisable(struct bfa_ioim_s
*ioim
)
2567 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2568 bfa_stats(ioim
->itnim
, io_iocdowns
);
2569 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HWFAIL
);
2573 * IO offline TOV popped. Fail the pending IO.
2576 bfa_ioim_tov(struct bfa_ioim_s
*ioim
)
2578 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2579 bfa_sm_send_event(ioim
, BFA_IOIM_SM_IOTOV
);
2584 * Allocate IOIM resource for initiator mode I/O request.
2587 bfa_ioim_alloc(struct bfa_s
*bfa
, struct bfad_ioim_s
*dio
,
2588 struct bfa_itnim_s
*itnim
, u16 nsges
)
2590 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
2591 struct bfa_ioim_s
*ioim
;
2594 * alocate IOIM resource
2596 bfa_q_deq(&fcpim
->ioim_free_q
, &ioim
);
2598 bfa_stats(itnim
, no_iotags
);
2603 ioim
->itnim
= itnim
;
2604 ioim
->nsges
= nsges
;
2607 bfa_stats(itnim
, total_ios
);
2608 fcpim
->ios_active
++;
2610 list_add_tail(&ioim
->qe
, &itnim
->io_q
);
2611 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2617 bfa_ioim_free(struct bfa_ioim_s
*ioim
)
2619 struct bfa_fcpim_mod_s
*fcpim
= ioim
->fcpim
;
2621 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2622 bfa_assert_fp(bfa_sm_cmp_state(ioim
, bfa_ioim_sm_uninit
));
2624 bfa_assert_fp(list_empty(&ioim
->sgpg_q
) ||
2625 (ioim
->nsges
> BFI_SGE_INLINE
));
2627 if (ioim
->nsgpgs
> 0)
2628 bfa_sgpg_mfree(ioim
->bfa
, &ioim
->sgpg_q
, ioim
->nsgpgs
);
2630 bfa_stats(ioim
->itnim
, io_comps
);
2631 fcpim
->ios_active
--;
2633 ioim
->iotag
&= BFA_IOIM_IOTAG_MASK
;
2634 list_del(&ioim
->qe
);
2635 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
2639 bfa_ioim_start(struct bfa_ioim_s
*ioim
)
2641 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2643 bfa_ioim_cb_profile_start(ioim
->fcpim
, ioim
);
2646 * Obtain the queue over which this request has to be issued
2648 ioim
->reqq
= bfa_fcpim_ioredirect_enabled(ioim
->bfa
) ?
2649 BFA_FALSE
: bfa_itnim_get_reqq(ioim
);
2651 bfa_sm_send_event(ioim
, BFA_IOIM_SM_START
);
2655 * Driver I/O abort request.
2658 bfa_ioim_abort(struct bfa_ioim_s
*ioim
)
2661 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2663 if (!bfa_ioim_is_abortable(ioim
))
2664 return BFA_STATUS_FAILED
;
2666 bfa_stats(ioim
->itnim
, io_aborts
);
2667 bfa_sm_send_event(ioim
, BFA_IOIM_SM_ABORT
);
2669 return BFA_STATUS_OK
;
2673 * BFA TSKIM state machine functions
2677 * Task management command beginning state.
2680 bfa_tskim_sm_uninit(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
2682 bfa_trc(tskim
->bfa
, event
);
2685 case BFA_TSKIM_SM_START
:
2686 bfa_sm_set_state(tskim
, bfa_tskim_sm_active
);
2687 bfa_tskim_gather_ios(tskim
);
2690 * If device is offline, do not send TM on wire. Just cleanup
2691 * any pending IO requests and complete TM request.
2693 if (!bfa_itnim_is_online(tskim
->itnim
)) {
2694 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
2695 tskim
->tsk_status
= BFI_TSKIM_STS_OK
;
2696 bfa_tskim_cleanup_ios(tskim
);
2700 if (!bfa_tskim_send(tskim
)) {
2701 bfa_sm_set_state(tskim
, bfa_tskim_sm_qfull
);
2702 bfa_stats(tskim
->itnim
, tm_qwait
);
2703 bfa_reqq_wait(tskim
->bfa
, tskim
->itnim
->reqq
,
2709 bfa_sm_fault(tskim
->bfa
, event
);
2714 * TM command is active, awaiting completion from firmware to
2715 * cleanup IO requests in TM scope.
2718 bfa_tskim_sm_active(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
2720 bfa_trc(tskim
->bfa
, event
);
2723 case BFA_TSKIM_SM_DONE
:
2724 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
2725 bfa_tskim_cleanup_ios(tskim
);
2728 case BFA_TSKIM_SM_CLEANUP
:
2729 bfa_sm_set_state(tskim
, bfa_tskim_sm_cleanup
);
2730 if (!bfa_tskim_send_abort(tskim
)) {
2731 bfa_sm_set_state(tskim
, bfa_tskim_sm_cleanup_qfull
);
2732 bfa_stats(tskim
->itnim
, tm_qwait
);
2733 bfa_reqq_wait(tskim
->bfa
, tskim
->itnim
->reqq
,
2738 case BFA_TSKIM_SM_HWFAIL
:
2739 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
2740 bfa_tskim_iocdisable_ios(tskim
);
2741 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
2745 bfa_sm_fault(tskim
->bfa
, event
);
2750 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2751 * completion event from firmware.
2754 bfa_tskim_sm_cleanup(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
2756 bfa_trc(tskim
->bfa
, event
);
2759 case BFA_TSKIM_SM_DONE
:
2761 * Ignore and wait for ABORT completion from firmware.
2765 case BFA_TSKIM_SM_CLEANUP_DONE
:
2766 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
2767 bfa_tskim_cleanup_ios(tskim
);
2770 case BFA_TSKIM_SM_HWFAIL
:
2771 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
2772 bfa_tskim_iocdisable_ios(tskim
);
2773 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
2777 bfa_sm_fault(tskim
->bfa
, event
);
2782 bfa_tskim_sm_iocleanup(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
2784 bfa_trc(tskim
->bfa
, event
);
2787 case BFA_TSKIM_SM_IOS_DONE
:
2788 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
2789 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_done
);
2792 case BFA_TSKIM_SM_CLEANUP
:
2794 * Ignore, TM command completed on wire.
2795 * Notify TM conmpletion on IO cleanup completion.
2799 case BFA_TSKIM_SM_HWFAIL
:
2800 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
2801 bfa_tskim_iocdisable_ios(tskim
);
2802 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
2806 bfa_sm_fault(tskim
->bfa
, event
);
2811 * Task management command is waiting for room in request CQ
2814 bfa_tskim_sm_qfull(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
2816 bfa_trc(tskim
->bfa
, event
);
2819 case BFA_TSKIM_SM_QRESUME
:
2820 bfa_sm_set_state(tskim
, bfa_tskim_sm_active
);
2821 bfa_tskim_send(tskim
);
2824 case BFA_TSKIM_SM_CLEANUP
:
2826 * No need to send TM on wire since ITN is offline.
2828 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
2829 bfa_reqq_wcancel(&tskim
->reqq_wait
);
2830 bfa_tskim_cleanup_ios(tskim
);
2833 case BFA_TSKIM_SM_HWFAIL
:
2834 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
2835 bfa_reqq_wcancel(&tskim
->reqq_wait
);
2836 bfa_tskim_iocdisable_ios(tskim
);
2837 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
2841 bfa_sm_fault(tskim
->bfa
, event
);
2846 * Task management command is active, awaiting for room in request CQ
2847 * to send clean up request.
2850 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s
*tskim
,
2851 enum bfa_tskim_event event
)
2853 bfa_trc(tskim
->bfa
, event
);
2856 case BFA_TSKIM_SM_DONE
:
2857 bfa_reqq_wcancel(&tskim
->reqq_wait
);
2861 case BFA_TSKIM_SM_QRESUME
:
2862 bfa_sm_set_state(tskim
, bfa_tskim_sm_cleanup
);
2863 bfa_tskim_send_abort(tskim
);
2866 case BFA_TSKIM_SM_HWFAIL
:
2867 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
2868 bfa_reqq_wcancel(&tskim
->reqq_wait
);
2869 bfa_tskim_iocdisable_ios(tskim
);
2870 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
2874 bfa_sm_fault(tskim
->bfa
, event
);
2879 * BFA callback is pending
2882 bfa_tskim_sm_hcb(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
2884 bfa_trc(tskim
->bfa
, event
);
2887 case BFA_TSKIM_SM_HCB
:
2888 bfa_sm_set_state(tskim
, bfa_tskim_sm_uninit
);
2889 bfa_tskim_free(tskim
);
2892 case BFA_TSKIM_SM_CLEANUP
:
2893 bfa_tskim_notify_comp(tskim
);
2896 case BFA_TSKIM_SM_HWFAIL
:
2900 bfa_sm_fault(tskim
->bfa
, event
);
2905 __bfa_cb_tskim_done(void *cbarg
, bfa_boolean_t complete
)
2907 struct bfa_tskim_s
*tskim
= cbarg
;
2910 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_HCB
);
2914 bfa_stats(tskim
->itnim
, tm_success
);
2915 bfa_cb_tskim_done(tskim
->bfa
->bfad
, tskim
->dtsk
, tskim
->tsk_status
);
2919 __bfa_cb_tskim_failed(void *cbarg
, bfa_boolean_t complete
)
2921 struct bfa_tskim_s
*tskim
= cbarg
;
2924 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_HCB
);
2928 bfa_stats(tskim
->itnim
, tm_failures
);
2929 bfa_cb_tskim_done(tskim
->bfa
->bfad
, tskim
->dtsk
,
2930 BFI_TSKIM_STS_FAILED
);
2933 static bfa_boolean_t
2934 bfa_tskim_match_scope(struct bfa_tskim_s
*tskim
, struct scsi_lun lun
)
2936 switch (tskim
->tm_cmnd
) {
2937 case FCP_TM_TARGET_RESET
:
2940 case FCP_TM_ABORT_TASK_SET
:
2941 case FCP_TM_CLEAR_TASK_SET
:
2942 case FCP_TM_LUN_RESET
:
2943 case FCP_TM_CLEAR_ACA
:
2944 return !memcmp(&tskim
->lun
, &lun
, sizeof(lun
));
2954 * Gather affected IO requests and task management commands.
2957 bfa_tskim_gather_ios(struct bfa_tskim_s
*tskim
)
2959 struct bfa_itnim_s
*itnim
= tskim
->itnim
;
2960 struct bfa_ioim_s
*ioim
;
2961 struct list_head
*qe
, *qen
;
2962 struct scsi_cmnd
*cmnd
;
2963 struct scsi_lun scsilun
;
2965 INIT_LIST_HEAD(&tskim
->io_q
);
2968 * Gather any active IO requests first.
2970 list_for_each_safe(qe
, qen
, &itnim
->io_q
) {
2971 ioim
= (struct bfa_ioim_s
*) qe
;
2972 cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
2973 int_to_scsilun(cmnd
->device
->lun
, &scsilun
);
2974 if (bfa_tskim_match_scope(tskim
, scsilun
)) {
2975 list_del(&ioim
->qe
);
2976 list_add_tail(&ioim
->qe
, &tskim
->io_q
);
2981 * Failback any pending IO requests immediately.
2983 list_for_each_safe(qe
, qen
, &itnim
->pending_q
) {
2984 ioim
= (struct bfa_ioim_s
*) qe
;
2985 cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
2986 int_to_scsilun(cmnd
->device
->lun
, &scsilun
);
2987 if (bfa_tskim_match_scope(tskim
, scsilun
)) {
2988 list_del(&ioim
->qe
);
2989 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
2996 * IO cleanup completion
2999 bfa_tskim_cleanp_comp(void *tskim_cbarg
)
3001 struct bfa_tskim_s
*tskim
= tskim_cbarg
;
3003 bfa_stats(tskim
->itnim
, tm_io_comps
);
3004 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_IOS_DONE
);
3008 * Gather affected IO requests and task management commands.
3011 bfa_tskim_cleanup_ios(struct bfa_tskim_s
*tskim
)
3013 struct bfa_ioim_s
*ioim
;
3014 struct list_head
*qe
, *qen
;
3016 bfa_wc_init(&tskim
->wc
, bfa_tskim_cleanp_comp
, tskim
);
3018 list_for_each_safe(qe
, qen
, &tskim
->io_q
) {
3019 ioim
= (struct bfa_ioim_s
*) qe
;
3020 bfa_wc_up(&tskim
->wc
);
3021 bfa_ioim_cleanup_tm(ioim
, tskim
);
3024 bfa_wc_wait(&tskim
->wc
);
3028 * Send task management request to firmware.
3030 static bfa_boolean_t
3031 bfa_tskim_send(struct bfa_tskim_s
*tskim
)
3033 struct bfa_itnim_s
*itnim
= tskim
->itnim
;
3034 struct bfi_tskim_req_s
*m
;
3037 * check for room in queue to send request now
3039 m
= bfa_reqq_next(tskim
->bfa
, itnim
->reqq
);
3044 * build i/o request message next
3046 bfi_h2i_set(m
->mh
, BFI_MC_TSKIM
, BFI_TSKIM_H2I_TM_REQ
,
3047 bfa_lpuid(tskim
->bfa
));
3049 m
->tsk_tag
= cpu_to_be16(tskim
->tsk_tag
);
3050 m
->itn_fhdl
= tskim
->itnim
->rport
->fw_handle
;
3051 m
->t_secs
= tskim
->tsecs
;
3052 m
->lun
= tskim
->lun
;
3053 m
->tm_flags
= tskim
->tm_cmnd
;
3056 * queue I/O message to firmware
3058 bfa_reqq_produce(tskim
->bfa
, itnim
->reqq
);
3063 * Send abort request to cleanup an active TM to firmware.
3065 static bfa_boolean_t
3066 bfa_tskim_send_abort(struct bfa_tskim_s
*tskim
)
3068 struct bfa_itnim_s
*itnim
= tskim
->itnim
;
3069 struct bfi_tskim_abortreq_s
*m
;
3072 * check for room in queue to send request now
3074 m
= bfa_reqq_next(tskim
->bfa
, itnim
->reqq
);
3079 * build i/o request message next
3081 bfi_h2i_set(m
->mh
, BFI_MC_TSKIM
, BFI_TSKIM_H2I_ABORT_REQ
,
3082 bfa_lpuid(tskim
->bfa
));
3084 m
->tsk_tag
= cpu_to_be16(tskim
->tsk_tag
);
3087 * queue I/O message to firmware
3089 bfa_reqq_produce(tskim
->bfa
, itnim
->reqq
);
3094 * Call to resume task management cmnd waiting for room in request queue.
3097 bfa_tskim_qresume(void *cbarg
)
3099 struct bfa_tskim_s
*tskim
= cbarg
;
3101 bfa_stats(tskim
->itnim
, tm_qresumes
);
3102 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_QRESUME
);
3106 * Cleanup IOs associated with a task mangement command on IOC failures.
3109 bfa_tskim_iocdisable_ios(struct bfa_tskim_s
*tskim
)
3111 struct bfa_ioim_s
*ioim
;
3112 struct list_head
*qe
, *qen
;
3114 list_for_each_safe(qe
, qen
, &tskim
->io_q
) {
3115 ioim
= (struct bfa_ioim_s
*) qe
;
3116 bfa_ioim_iocdisable(ioim
);
3121 * Notification on completions from related ioim.
3124 bfa_tskim_iodone(struct bfa_tskim_s
*tskim
)
3126 bfa_wc_down(&tskim
->wc
);
3130 * Handle IOC h/w failure notification from itnim.
3133 bfa_tskim_iocdisable(struct bfa_tskim_s
*tskim
)
3135 tskim
->notify
= BFA_FALSE
;
3136 bfa_stats(tskim
->itnim
, tm_iocdowns
);
3137 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_HWFAIL
);
3141 * Cleanup TM command and associated IOs as part of ITNIM offline.
3144 bfa_tskim_cleanup(struct bfa_tskim_s
*tskim
)
3146 tskim
->notify
= BFA_TRUE
;
3147 bfa_stats(tskim
->itnim
, tm_cleanups
);
3148 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_CLEANUP
);
3152 * Memory allocation and initialization.
3155 bfa_tskim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
3157 struct bfa_tskim_s
*tskim
;
3160 INIT_LIST_HEAD(&fcpim
->tskim_free_q
);
3162 tskim
= (struct bfa_tskim_s
*) bfa_meminfo_kva(minfo
);
3163 fcpim
->tskim_arr
= tskim
;
3165 for (i
= 0; i
< fcpim
->num_tskim_reqs
; i
++, tskim
++) {
3169 memset(tskim
, 0, sizeof(struct bfa_tskim_s
));
3171 tskim
->bfa
= fcpim
->bfa
;
3172 tskim
->fcpim
= fcpim
;
3173 tskim
->notify
= BFA_FALSE
;
3174 bfa_reqq_winit(&tskim
->reqq_wait
, bfa_tskim_qresume
,
3176 bfa_sm_set_state(tskim
, bfa_tskim_sm_uninit
);
3178 list_add_tail(&tskim
->qe
, &fcpim
->tskim_free_q
);
3181 bfa_meminfo_kva(minfo
) = (u8
*) tskim
;
3185 bfa_tskim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
3187 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
3188 struct bfi_tskim_rsp_s
*rsp
= (struct bfi_tskim_rsp_s
*) m
;
3189 struct bfa_tskim_s
*tskim
;
3190 u16 tsk_tag
= be16_to_cpu(rsp
->tsk_tag
);
3192 tskim
= BFA_TSKIM_FROM_TAG(fcpim
, tsk_tag
);
3193 WARN_ON(tskim
->tsk_tag
!= tsk_tag
);
3195 tskim
->tsk_status
= rsp
->tsk_status
;
3198 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3199 * requests. All other statuses are for normal completions.
3201 if (rsp
->tsk_status
== BFI_TSKIM_STS_ABORTED
) {
3202 bfa_stats(tskim
->itnim
, tm_cleanup_comps
);
3203 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_CLEANUP_DONE
);
3205 bfa_stats(tskim
->itnim
, tm_fw_rsps
);
3206 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_DONE
);
3211 struct bfa_tskim_s
*
3212 bfa_tskim_alloc(struct bfa_s
*bfa
, struct bfad_tskim_s
*dtsk
)
3214 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
3215 struct bfa_tskim_s
*tskim
;
3217 bfa_q_deq(&fcpim
->tskim_free_q
, &tskim
);
3226 bfa_tskim_free(struct bfa_tskim_s
*tskim
)
3228 WARN_ON(!bfa_q_is_on_q_func(&tskim
->itnim
->tsk_q
, &tskim
->qe
));
3229 list_del(&tskim
->qe
);
3230 list_add_tail(&tskim
->qe
, &tskim
->fcpim
->tskim_free_q
);
3234 * Start a task management command.
3236 * @param[in] tskim BFA task management command instance
3237 * @param[in] itnim i-t nexus for the task management command
3238 * @param[in] lun lun, if applicable
3239 * @param[in] tm_cmnd Task management command code.
3240 * @param[in] t_secs Timeout in seconds
3245 bfa_tskim_start(struct bfa_tskim_s
*tskim
, struct bfa_itnim_s
*itnim
,
3246 struct scsi_lun lun
,
3247 enum fcp_tm_cmnd tm_cmnd
, u8 tsecs
)
3249 tskim
->itnim
= itnim
;
3251 tskim
->tm_cmnd
= tm_cmnd
;
3252 tskim
->tsecs
= tsecs
;
3253 tskim
->notify
= BFA_FALSE
;
3254 bfa_stats(itnim
, tm_cmnds
);
3256 list_add_tail(&tskim
->qe
, &itnim
->tsk_q
);
3257 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_START
);