2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfa_modules.h"
19 #include "bfa_cb_ioim.h"
21 BFA_TRC_FILE(HAL
, FCPIM
);
25 #define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
30 * BFA ITNIM Related definitions
32 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s
*itnim
);
34 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
37 #define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39 #define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
48 #define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
57 #define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
66 #define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
76 * bfa_itnim_sm BFA itnim state machine
80 enum bfa_itnim_event
{
81 BFA_ITNIM_SM_CREATE
= 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE
= 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE
= 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP
= 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE
= 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP
= 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER
= 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL
= 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME
= 9, /* queue space available */
93 * BFA IOIM related definitions
95 #define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
101 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
106 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
115 * IO state machine events
117 enum bfa_ioim_event
{
118 BFA_IOIM_SM_START
= 1, /* io start request from host */
119 BFA_IOIM_SM_COMP_GOOD
= 2, /* io good comp, resource free */
120 BFA_IOIM_SM_COMP
= 3, /* io comp, resource is free */
121 BFA_IOIM_SM_COMP_UTAG
= 4, /* io comp, resource is free */
122 BFA_IOIM_SM_DONE
= 5, /* io comp, resource not free */
123 BFA_IOIM_SM_FREE
= 6, /* io resource is freed */
124 BFA_IOIM_SM_ABORT
= 7, /* abort request from scsi stack */
125 BFA_IOIM_SM_ABORT_COMP
= 8, /* abort from f/w */
126 BFA_IOIM_SM_ABORT_DONE
= 9, /* abort completion from f/w */
127 BFA_IOIM_SM_QRESUME
= 10, /* CQ space available to queue IO */
128 BFA_IOIM_SM_SGALLOCED
= 11, /* SG page allocation successful */
129 BFA_IOIM_SM_SQRETRY
= 12, /* sequence recovery retry */
130 BFA_IOIM_SM_HCB
= 13, /* bfa callback complete */
131 BFA_IOIM_SM_CLEANUP
= 14, /* IO cleanup from itnim */
132 BFA_IOIM_SM_TMSTART
= 15, /* IO cleanup from tskim */
133 BFA_IOIM_SM_TMDONE
= 16, /* IO cleanup from tskim */
134 BFA_IOIM_SM_HWFAIL
= 17, /* IOC h/w failure event */
135 BFA_IOIM_SM_IOTOV
= 18, /* ITN offline TOV */
140 * BFA TSKIM related definitions
144 * task management completion handling
146 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
147 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
148 bfa_tskim_notify_comp(__tskim); \
151 #define bfa_tskim_notify_comp(__tskim) do { \
152 if ((__tskim)->notify) \
153 bfa_itnim_tskdone((__tskim)->itnim); \
157 enum bfa_tskim_event
{
158 BFA_TSKIM_SM_START
= 1, /* TM command start */
159 BFA_TSKIM_SM_DONE
= 2, /* TM completion */
160 BFA_TSKIM_SM_QRESUME
= 3, /* resume after qfull */
161 BFA_TSKIM_SM_HWFAIL
= 5, /* IOC h/w failure event */
162 BFA_TSKIM_SM_HCB
= 6, /* BFA callback completion */
163 BFA_TSKIM_SM_IOS_DONE
= 7, /* IO and sub TM completions */
164 BFA_TSKIM_SM_CLEANUP
= 8, /* TM cleanup on ITN offline */
165 BFA_TSKIM_SM_CLEANUP_DONE
= 9, /* TM abort completion */
169 * forward declaration for BFA ITNIM functions
171 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s
*itnim
);
172 static bfa_boolean_t
bfa_itnim_send_fwcreate(struct bfa_itnim_s
*itnim
);
173 static bfa_boolean_t
bfa_itnim_send_fwdelete(struct bfa_itnim_s
*itnim
);
174 static void bfa_itnim_cleanp_comp(void *itnim_cbarg
);
175 static void bfa_itnim_cleanup(struct bfa_itnim_s
*itnim
);
176 static void __bfa_cb_itnim_online(void *cbarg
, bfa_boolean_t complete
);
177 static void __bfa_cb_itnim_offline(void *cbarg
, bfa_boolean_t complete
);
178 static void __bfa_cb_itnim_sler(void *cbarg
, bfa_boolean_t complete
);
179 static void bfa_itnim_iotov_online(struct bfa_itnim_s
*itnim
);
180 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s
*itnim
);
181 static void bfa_itnim_iotov(void *itnim_arg
);
182 static void bfa_itnim_iotov_start(struct bfa_itnim_s
*itnim
);
183 static void bfa_itnim_iotov_stop(struct bfa_itnim_s
*itnim
);
184 static void bfa_itnim_iotov_delete(struct bfa_itnim_s
*itnim
);
187 * forward declaration of ITNIM state machine
189 static void bfa_itnim_sm_uninit(struct bfa_itnim_s
*itnim
,
190 enum bfa_itnim_event event
);
191 static void bfa_itnim_sm_created(struct bfa_itnim_s
*itnim
,
192 enum bfa_itnim_event event
);
193 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s
*itnim
,
194 enum bfa_itnim_event event
);
195 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s
*itnim
,
196 enum bfa_itnim_event event
);
197 static void bfa_itnim_sm_online(struct bfa_itnim_s
*itnim
,
198 enum bfa_itnim_event event
);
199 static void bfa_itnim_sm_sler(struct bfa_itnim_s
*itnim
,
200 enum bfa_itnim_event event
);
201 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s
*itnim
,
202 enum bfa_itnim_event event
);
203 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s
*itnim
,
204 enum bfa_itnim_event event
);
205 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s
*itnim
,
206 enum bfa_itnim_event event
);
207 static void bfa_itnim_sm_offline(struct bfa_itnim_s
*itnim
,
208 enum bfa_itnim_event event
);
209 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s
*itnim
,
210 enum bfa_itnim_event event
);
211 static void bfa_itnim_sm_deleting(struct bfa_itnim_s
*itnim
,
212 enum bfa_itnim_event event
);
213 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s
*itnim
,
214 enum bfa_itnim_event event
);
215 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s
*itnim
,
216 enum bfa_itnim_event event
);
217 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s
*itnim
,
218 enum bfa_itnim_event event
);
221 * forward declaration for BFA IOIM functions
223 static bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
);
224 static bfa_boolean_t
bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
);
225 static void bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
);
226 static bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
);
227 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
);
228 static void __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
);
229 static void __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
);
230 static void __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
);
231 static void __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
);
232 static void __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
);
233 static bfa_boolean_t
bfa_ioim_is_abortable(struct bfa_ioim_s
*ioim
);
237 * forward declaration of BFA IO state machine
239 static void bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
,
240 enum bfa_ioim_event event
);
241 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
,
242 enum bfa_ioim_event event
);
243 static void bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
,
244 enum bfa_ioim_event event
);
245 static void bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
,
246 enum bfa_ioim_event event
);
247 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
,
248 enum bfa_ioim_event event
);
249 static void bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
,
250 enum bfa_ioim_event event
);
251 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
,
252 enum bfa_ioim_event event
);
253 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
,
254 enum bfa_ioim_event event
);
255 static void bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
,
256 enum bfa_ioim_event event
);
257 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
,
258 enum bfa_ioim_event event
);
259 static void bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
,
260 enum bfa_ioim_event event
);
261 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s
*ioim
,
262 enum bfa_ioim_event event
);
265 * forward declaration for BFA TSKIM functions
267 static void __bfa_cb_tskim_done(void *cbarg
, bfa_boolean_t complete
);
268 static void __bfa_cb_tskim_failed(void *cbarg
, bfa_boolean_t complete
);
269 static bfa_boolean_t
bfa_tskim_match_scope(struct bfa_tskim_s
*tskim
,
271 static void bfa_tskim_gather_ios(struct bfa_tskim_s
*tskim
);
272 static void bfa_tskim_cleanp_comp(void *tskim_cbarg
);
273 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s
*tskim
);
274 static bfa_boolean_t
bfa_tskim_send(struct bfa_tskim_s
*tskim
);
275 static bfa_boolean_t
bfa_tskim_send_abort(struct bfa_tskim_s
*tskim
);
276 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s
*tskim
);
280 * forward declaration of BFA TSKIM state machine
282 static void bfa_tskim_sm_uninit(struct bfa_tskim_s
*tskim
,
283 enum bfa_tskim_event event
);
284 static void bfa_tskim_sm_active(struct bfa_tskim_s
*tskim
,
285 enum bfa_tskim_event event
);
286 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s
*tskim
,
287 enum bfa_tskim_event event
);
288 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s
*tskim
,
289 enum bfa_tskim_event event
);
290 static void bfa_tskim_sm_qfull(struct bfa_tskim_s
*tskim
,
291 enum bfa_tskim_event event
);
292 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s
*tskim
,
293 enum bfa_tskim_event event
);
294 static void bfa_tskim_sm_hcb(struct bfa_tskim_s
*tskim
,
295 enum bfa_tskim_event event
);
298 * hal_fcpim_mod BFA FCP Initiator Mode module
302 * Compute and return memory needed by FCP(im) module.
305 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
308 bfa_itnim_meminfo(cfg
, km_len
, dm_len
);
313 if (cfg
->fwcfg
.num_ioim_reqs
< BFA_IOIM_MIN
)
314 cfg
->fwcfg
.num_ioim_reqs
= BFA_IOIM_MIN
;
315 else if (cfg
->fwcfg
.num_ioim_reqs
> BFA_IOIM_MAX
)
316 cfg
->fwcfg
.num_ioim_reqs
= BFA_IOIM_MAX
;
318 *km_len
+= cfg
->fwcfg
.num_ioim_reqs
*
319 (sizeof(struct bfa_ioim_s
) + sizeof(struct bfa_ioim_sp_s
));
321 *dm_len
+= cfg
->fwcfg
.num_ioim_reqs
* BFI_IOIM_SNSLEN
;
324 * task management command memory
326 if (cfg
->fwcfg
.num_tskim_reqs
< BFA_TSKIM_MIN
)
327 cfg
->fwcfg
.num_tskim_reqs
= BFA_TSKIM_MIN
;
328 *km_len
+= cfg
->fwcfg
.num_tskim_reqs
* sizeof(struct bfa_tskim_s
);
333 bfa_fcpim_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
334 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
336 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
338 bfa_trc(bfa
, cfg
->drvcfg
.path_tov
);
339 bfa_trc(bfa
, cfg
->fwcfg
.num_rports
);
340 bfa_trc(bfa
, cfg
->fwcfg
.num_ioim_reqs
);
341 bfa_trc(bfa
, cfg
->fwcfg
.num_tskim_reqs
);
344 fcpim
->num_itnims
= cfg
->fwcfg
.num_rports
;
345 fcpim
->num_ioim_reqs
= cfg
->fwcfg
.num_ioim_reqs
;
346 fcpim
->num_tskim_reqs
= cfg
->fwcfg
.num_tskim_reqs
;
347 fcpim
->path_tov
= cfg
->drvcfg
.path_tov
;
348 fcpim
->delay_comp
= cfg
->drvcfg
.delay_comp
;
349 fcpim
->profile_comp
= NULL
;
350 fcpim
->profile_start
= NULL
;
352 bfa_itnim_attach(fcpim
, meminfo
);
353 bfa_tskim_attach(fcpim
, meminfo
);
354 bfa_ioim_attach(fcpim
, meminfo
);
358 bfa_fcpim_detach(struct bfa_s
*bfa
)
360 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
362 bfa_ioim_detach(fcpim
);
363 bfa_tskim_detach(fcpim
);
367 bfa_fcpim_start(struct bfa_s
*bfa
)
372 bfa_fcpim_stop(struct bfa_s
*bfa
)
377 bfa_fcpim_iocdisable(struct bfa_s
*bfa
)
379 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
380 struct bfa_itnim_s
*itnim
;
381 struct list_head
*qe
, *qen
;
383 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
384 itnim
= (struct bfa_itnim_s
*) qe
;
385 bfa_itnim_iocdisable(itnim
);
390 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s
*lstats
,
391 struct bfa_itnim_iostats_s
*rstats
)
393 bfa_fcpim_add_iostats(lstats
, rstats
, total_ios
);
394 bfa_fcpim_add_iostats(lstats
, rstats
, qresumes
);
395 bfa_fcpim_add_iostats(lstats
, rstats
, no_iotags
);
396 bfa_fcpim_add_iostats(lstats
, rstats
, io_aborts
);
397 bfa_fcpim_add_iostats(lstats
, rstats
, no_tskims
);
398 bfa_fcpim_add_iostats(lstats
, rstats
, iocomp_ok
);
399 bfa_fcpim_add_iostats(lstats
, rstats
, iocomp_underrun
);
400 bfa_fcpim_add_iostats(lstats
, rstats
, iocomp_overrun
);
401 bfa_fcpim_add_iostats(lstats
, rstats
, iocomp_aborted
);
402 bfa_fcpim_add_iostats(lstats
, rstats
, iocomp_timedout
);
403 bfa_fcpim_add_iostats(lstats
, rstats
, iocom_nexus_abort
);
404 bfa_fcpim_add_iostats(lstats
, rstats
, iocom_proto_err
);
405 bfa_fcpim_add_iostats(lstats
, rstats
, iocom_dif_err
);
406 bfa_fcpim_add_iostats(lstats
, rstats
, iocom_sqer_needed
);
407 bfa_fcpim_add_iostats(lstats
, rstats
, iocom_res_free
);
408 bfa_fcpim_add_iostats(lstats
, rstats
, iocom_hostabrts
);
409 bfa_fcpim_add_iostats(lstats
, rstats
, iocom_utags
);
410 bfa_fcpim_add_iostats(lstats
, rstats
, io_cleanups
);
411 bfa_fcpim_add_iostats(lstats
, rstats
, io_tmaborts
);
412 bfa_fcpim_add_iostats(lstats
, rstats
, onlines
);
413 bfa_fcpim_add_iostats(lstats
, rstats
, offlines
);
414 bfa_fcpim_add_iostats(lstats
, rstats
, creates
);
415 bfa_fcpim_add_iostats(lstats
, rstats
, deletes
);
416 bfa_fcpim_add_iostats(lstats
, rstats
, create_comps
);
417 bfa_fcpim_add_iostats(lstats
, rstats
, delete_comps
);
418 bfa_fcpim_add_iostats(lstats
, rstats
, sler_events
);
419 bfa_fcpim_add_iostats(lstats
, rstats
, fw_create
);
420 bfa_fcpim_add_iostats(lstats
, rstats
, fw_delete
);
421 bfa_fcpim_add_iostats(lstats
, rstats
, ioc_disabled
);
422 bfa_fcpim_add_iostats(lstats
, rstats
, cleanup_comps
);
423 bfa_fcpim_add_iostats(lstats
, rstats
, tm_cmnds
);
424 bfa_fcpim_add_iostats(lstats
, rstats
, tm_fw_rsps
);
425 bfa_fcpim_add_iostats(lstats
, rstats
, tm_success
);
426 bfa_fcpim_add_iostats(lstats
, rstats
, tm_failures
);
427 bfa_fcpim_add_iostats(lstats
, rstats
, tm_io_comps
);
428 bfa_fcpim_add_iostats(lstats
, rstats
, tm_qresumes
);
429 bfa_fcpim_add_iostats(lstats
, rstats
, tm_iocdowns
);
430 bfa_fcpim_add_iostats(lstats
, rstats
, tm_cleanups
);
431 bfa_fcpim_add_iostats(lstats
, rstats
, tm_cleanup_comps
);
432 bfa_fcpim_add_iostats(lstats
, rstats
, io_comps
);
433 bfa_fcpim_add_iostats(lstats
, rstats
, input_reqs
);
434 bfa_fcpim_add_iostats(lstats
, rstats
, output_reqs
);
435 bfa_fcpim_add_iostats(lstats
, rstats
, rd_throughput
);
436 bfa_fcpim_add_iostats(lstats
, rstats
, wr_throughput
);
440 bfa_fcpim_path_tov_set(struct bfa_s
*bfa
, u16 path_tov
)
442 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
444 fcpim
->path_tov
= path_tov
* 1000;
445 if (fcpim
->path_tov
> BFA_FCPIM_PATHTOV_MAX
)
446 fcpim
->path_tov
= BFA_FCPIM_PATHTOV_MAX
;
450 bfa_fcpim_path_tov_get(struct bfa_s
*bfa
)
452 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
454 return fcpim
->path_tov
/ 1000;
458 bfa_fcpim_port_iostats(struct bfa_s
*bfa
, struct bfa_itnim_iostats_s
*stats
,
461 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
462 struct list_head
*qe
, *qen
;
463 struct bfa_itnim_s
*itnim
;
465 /* accumulate IO stats from itnim */
466 bfa_os_memset(stats
, 0, sizeof(struct bfa_itnim_iostats_s
));
467 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
468 itnim
= (struct bfa_itnim_s
*) qe
;
469 if (itnim
->rport
->rport_info
.lp_tag
!= lp_tag
)
471 bfa_fcpim_add_stats(stats
, &(itnim
->stats
));
473 return BFA_STATUS_OK
;
476 bfa_fcpim_get_modstats(struct bfa_s
*bfa
, struct bfa_itnim_iostats_s
*modstats
)
478 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
479 struct list_head
*qe
, *qen
;
480 struct bfa_itnim_s
*itnim
;
482 /* accumulate IO stats from itnim */
483 bfa_os_memset(modstats
, 0, sizeof(struct bfa_itnim_iostats_s
));
484 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
485 itnim
= (struct bfa_itnim_s
*) qe
;
486 bfa_fcpim_add_stats(modstats
, &(itnim
->stats
));
488 return BFA_STATUS_OK
;
492 bfa_fcpim_get_del_itn_stats(struct bfa_s
*bfa
,
493 struct bfa_fcpim_del_itn_stats_s
*modstats
)
495 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
497 *modstats
= fcpim
->del_itn_stats
;
499 return BFA_STATUS_OK
;
504 bfa_fcpim_profile_on(struct bfa_s
*bfa
, u32 time
)
506 struct bfa_itnim_s
*itnim
;
507 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
508 struct list_head
*qe
, *qen
;
510 /* accumulate IO stats from itnim */
511 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
512 itnim
= (struct bfa_itnim_s
*) qe
;
513 bfa_itnim_clear_stats(itnim
);
515 fcpim
->io_profile
= BFA_TRUE
;
516 fcpim
->io_profile_start_time
= time
;
517 fcpim
->profile_comp
= bfa_ioim_profile_comp
;
518 fcpim
->profile_start
= bfa_ioim_profile_start
;
520 return BFA_STATUS_OK
;
523 bfa_fcpim_profile_off(struct bfa_s
*bfa
)
525 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
526 fcpim
->io_profile
= BFA_FALSE
;
527 fcpim
->io_profile_start_time
= 0;
528 fcpim
->profile_comp
= NULL
;
529 fcpim
->profile_start
= NULL
;
530 return BFA_STATUS_OK
;
534 bfa_fcpim_port_clear_iostats(struct bfa_s
*bfa
, u8 lp_tag
)
536 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
537 struct list_head
*qe
, *qen
;
538 struct bfa_itnim_s
*itnim
;
540 /* clear IO stats from all active itnims */
541 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
542 itnim
= (struct bfa_itnim_s
*) qe
;
543 if (itnim
->rport
->rport_info
.lp_tag
!= lp_tag
)
545 bfa_itnim_clear_stats(itnim
);
547 return BFA_STATUS_OK
;
552 bfa_fcpim_clr_modstats(struct bfa_s
*bfa
)
554 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
555 struct list_head
*qe
, *qen
;
556 struct bfa_itnim_s
*itnim
;
558 /* clear IO stats from all active itnims */
559 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
560 itnim
= (struct bfa_itnim_s
*) qe
;
561 bfa_itnim_clear_stats(itnim
);
563 bfa_os_memset(&fcpim
->del_itn_stats
, 0,
564 sizeof(struct bfa_fcpim_del_itn_stats_s
));
566 return BFA_STATUS_OK
;
570 bfa_fcpim_qdepth_set(struct bfa_s
*bfa
, u16 q_depth
)
572 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
574 bfa_assert(q_depth
<= BFA_IOCFC_QDEPTH_MAX
);
576 fcpim
->q_depth
= q_depth
;
580 bfa_fcpim_qdepth_get(struct bfa_s
*bfa
)
582 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
584 return fcpim
->q_depth
;
588 bfa_fcpim_update_ioredirect(struct bfa_s
*bfa
)
590 bfa_boolean_t ioredirect
;
593 * IO redirection is turned off when QoS is enabled and vice versa
595 ioredirect
= bfa_fcport_is_qos_enabled(bfa
) ? BFA_FALSE
: BFA_TRUE
;
599 bfa_fcpim_set_ioredirect(struct bfa_s
*bfa
, bfa_boolean_t state
)
601 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
602 fcpim
->ioredirect
= state
;
608 * BFA ITNIM module state machine functions
612 * Beginning/unallocated state - no events expected.
615 bfa_itnim_sm_uninit(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
617 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
618 bfa_trc(itnim
->bfa
, event
);
621 case BFA_ITNIM_SM_CREATE
:
622 bfa_sm_set_state(itnim
, bfa_itnim_sm_created
);
623 itnim
->is_online
= BFA_FALSE
;
624 bfa_fcpim_additn(itnim
);
628 bfa_sm_fault(itnim
->bfa
, event
);
633 * Beginning state, only online event expected.
636 bfa_itnim_sm_created(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
638 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
639 bfa_trc(itnim
->bfa
, event
);
642 case BFA_ITNIM_SM_ONLINE
:
643 if (bfa_itnim_send_fwcreate(itnim
))
644 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
646 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate_qfull
);
649 case BFA_ITNIM_SM_DELETE
:
650 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
651 bfa_fcpim_delitn(itnim
);
654 case BFA_ITNIM_SM_HWFAIL
:
655 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
659 bfa_sm_fault(itnim
->bfa
, event
);
664 * Waiting for itnim create response from firmware.
667 bfa_itnim_sm_fwcreate(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
669 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
670 bfa_trc(itnim
->bfa
, event
);
673 case BFA_ITNIM_SM_FWRSP
:
674 bfa_sm_set_state(itnim
, bfa_itnim_sm_online
);
675 itnim
->is_online
= BFA_TRUE
;
676 bfa_itnim_iotov_online(itnim
);
677 bfa_itnim_online_cb(itnim
);
680 case BFA_ITNIM_SM_DELETE
:
681 bfa_sm_set_state(itnim
, bfa_itnim_sm_delete_pending
);
684 case BFA_ITNIM_SM_OFFLINE
:
685 if (bfa_itnim_send_fwdelete(itnim
))
686 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete
);
688 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete_qfull
);
691 case BFA_ITNIM_SM_HWFAIL
:
692 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
696 bfa_sm_fault(itnim
->bfa
, event
);
701 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s
*itnim
,
702 enum bfa_itnim_event event
)
704 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
705 bfa_trc(itnim
->bfa
, event
);
708 case BFA_ITNIM_SM_QRESUME
:
709 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
710 bfa_itnim_send_fwcreate(itnim
);
713 case BFA_ITNIM_SM_DELETE
:
714 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
715 bfa_reqq_wcancel(&itnim
->reqq_wait
);
716 bfa_fcpim_delitn(itnim
);
719 case BFA_ITNIM_SM_OFFLINE
:
720 bfa_sm_set_state(itnim
, bfa_itnim_sm_offline
);
721 bfa_reqq_wcancel(&itnim
->reqq_wait
);
722 bfa_itnim_offline_cb(itnim
);
725 case BFA_ITNIM_SM_HWFAIL
:
726 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
727 bfa_reqq_wcancel(&itnim
->reqq_wait
);
731 bfa_sm_fault(itnim
->bfa
, event
);
736 * Waiting for itnim create response from firmware, a delete is pending.
739 bfa_itnim_sm_delete_pending(struct bfa_itnim_s
*itnim
,
740 enum bfa_itnim_event event
)
742 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
743 bfa_trc(itnim
->bfa
, event
);
746 case BFA_ITNIM_SM_FWRSP
:
747 if (bfa_itnim_send_fwdelete(itnim
))
748 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
750 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting_qfull
);
753 case BFA_ITNIM_SM_HWFAIL
:
754 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
755 bfa_fcpim_delitn(itnim
);
759 bfa_sm_fault(itnim
->bfa
, event
);
764 * Online state - normal parking state.
767 bfa_itnim_sm_online(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
769 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
770 bfa_trc(itnim
->bfa
, event
);
773 case BFA_ITNIM_SM_OFFLINE
:
774 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_offline
);
775 itnim
->is_online
= BFA_FALSE
;
776 bfa_itnim_iotov_start(itnim
);
777 bfa_itnim_cleanup(itnim
);
780 case BFA_ITNIM_SM_DELETE
:
781 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_delete
);
782 itnim
->is_online
= BFA_FALSE
;
783 bfa_itnim_cleanup(itnim
);
786 case BFA_ITNIM_SM_SLER
:
787 bfa_sm_set_state(itnim
, bfa_itnim_sm_sler
);
788 itnim
->is_online
= BFA_FALSE
;
789 bfa_itnim_iotov_start(itnim
);
790 bfa_itnim_sler_cb(itnim
);
793 case BFA_ITNIM_SM_HWFAIL
:
794 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
795 itnim
->is_online
= BFA_FALSE
;
796 bfa_itnim_iotov_start(itnim
);
797 bfa_itnim_iocdisable_cleanup(itnim
);
801 bfa_sm_fault(itnim
->bfa
, event
);
806 * Second level error recovery need.
809 bfa_itnim_sm_sler(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
811 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
812 bfa_trc(itnim
->bfa
, event
);
815 case BFA_ITNIM_SM_OFFLINE
:
816 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_offline
);
817 bfa_itnim_cleanup(itnim
);
820 case BFA_ITNIM_SM_DELETE
:
821 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_delete
);
822 bfa_itnim_cleanup(itnim
);
823 bfa_itnim_iotov_delete(itnim
);
826 case BFA_ITNIM_SM_HWFAIL
:
827 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
828 bfa_itnim_iocdisable_cleanup(itnim
);
832 bfa_sm_fault(itnim
->bfa
, event
);
837 * Going offline. Waiting for active IO cleanup.
840 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s
*itnim
,
841 enum bfa_itnim_event event
)
843 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
844 bfa_trc(itnim
->bfa
, event
);
847 case BFA_ITNIM_SM_CLEANUP
:
848 if (bfa_itnim_send_fwdelete(itnim
))
849 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete
);
851 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete_qfull
);
854 case BFA_ITNIM_SM_DELETE
:
855 bfa_sm_set_state(itnim
, bfa_itnim_sm_cleanup_delete
);
856 bfa_itnim_iotov_delete(itnim
);
859 case BFA_ITNIM_SM_HWFAIL
:
860 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
861 bfa_itnim_iocdisable_cleanup(itnim
);
862 bfa_itnim_offline_cb(itnim
);
865 case BFA_ITNIM_SM_SLER
:
869 bfa_sm_fault(itnim
->bfa
, event
);
874 * Deleting itnim. Waiting for active IO cleanup.
877 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s
*itnim
,
878 enum bfa_itnim_event event
)
880 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
881 bfa_trc(itnim
->bfa
, event
);
884 case BFA_ITNIM_SM_CLEANUP
:
885 if (bfa_itnim_send_fwdelete(itnim
))
886 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
888 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting_qfull
);
891 case BFA_ITNIM_SM_HWFAIL
:
892 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
893 bfa_itnim_iocdisable_cleanup(itnim
);
897 bfa_sm_fault(itnim
->bfa
, event
);
902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
905 bfa_itnim_sm_fwdelete(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
907 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
908 bfa_trc(itnim
->bfa
, event
);
911 case BFA_ITNIM_SM_FWRSP
:
912 bfa_sm_set_state(itnim
, bfa_itnim_sm_offline
);
913 bfa_itnim_offline_cb(itnim
);
916 case BFA_ITNIM_SM_DELETE
:
917 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
920 case BFA_ITNIM_SM_HWFAIL
:
921 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
922 bfa_itnim_offline_cb(itnim
);
926 bfa_sm_fault(itnim
->bfa
, event
);
931 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s
*itnim
,
932 enum bfa_itnim_event event
)
934 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
935 bfa_trc(itnim
->bfa
, event
);
938 case BFA_ITNIM_SM_QRESUME
:
939 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwdelete
);
940 bfa_itnim_send_fwdelete(itnim
);
943 case BFA_ITNIM_SM_DELETE
:
944 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting_qfull
);
947 case BFA_ITNIM_SM_HWFAIL
:
948 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
949 bfa_reqq_wcancel(&itnim
->reqq_wait
);
950 bfa_itnim_offline_cb(itnim
);
954 bfa_sm_fault(itnim
->bfa
, event
);
962 bfa_itnim_sm_offline(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
964 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
965 bfa_trc(itnim
->bfa
, event
);
968 case BFA_ITNIM_SM_DELETE
:
969 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
970 bfa_itnim_iotov_delete(itnim
);
971 bfa_fcpim_delitn(itnim
);
974 case BFA_ITNIM_SM_ONLINE
:
975 if (bfa_itnim_send_fwcreate(itnim
))
976 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
978 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate_qfull
);
981 case BFA_ITNIM_SM_HWFAIL
:
982 bfa_sm_set_state(itnim
, bfa_itnim_sm_iocdisable
);
986 bfa_sm_fault(itnim
->bfa
, event
);
991 * IOC h/w failed state.
994 bfa_itnim_sm_iocdisable(struct bfa_itnim_s
*itnim
,
995 enum bfa_itnim_event event
)
997 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
998 bfa_trc(itnim
->bfa
, event
);
1001 case BFA_ITNIM_SM_DELETE
:
1002 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
1003 bfa_itnim_iotov_delete(itnim
);
1004 bfa_fcpim_delitn(itnim
);
1007 case BFA_ITNIM_SM_OFFLINE
:
1008 bfa_itnim_offline_cb(itnim
);
1011 case BFA_ITNIM_SM_ONLINE
:
1012 if (bfa_itnim_send_fwcreate(itnim
))
1013 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate
);
1015 bfa_sm_set_state(itnim
, bfa_itnim_sm_fwcreate_qfull
);
1018 case BFA_ITNIM_SM_HWFAIL
:
1022 bfa_sm_fault(itnim
->bfa
, event
);
1027 * Itnim is deleted, waiting for firmware response to delete.
1030 bfa_itnim_sm_deleting(struct bfa_itnim_s
*itnim
, enum bfa_itnim_event event
)
1032 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
1033 bfa_trc(itnim
->bfa
, event
);
1036 case BFA_ITNIM_SM_FWRSP
:
1037 case BFA_ITNIM_SM_HWFAIL
:
1038 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
1039 bfa_fcpim_delitn(itnim
);
1043 bfa_sm_fault(itnim
->bfa
, event
);
1048 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s
*itnim
,
1049 enum bfa_itnim_event event
)
1051 bfa_trc(itnim
->bfa
, itnim
->rport
->rport_tag
);
1052 bfa_trc(itnim
->bfa
, event
);
1055 case BFA_ITNIM_SM_QRESUME
:
1056 bfa_sm_set_state(itnim
, bfa_itnim_sm_deleting
);
1057 bfa_itnim_send_fwdelete(itnim
);
1060 case BFA_ITNIM_SM_HWFAIL
:
1061 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
1062 bfa_reqq_wcancel(&itnim
->reqq_wait
);
1063 bfa_fcpim_delitn(itnim
);
1067 bfa_sm_fault(itnim
->bfa
, event
);
1072 * Initiate cleanup of all IOs on an IOC failure.
1075 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s
*itnim
)
1077 struct bfa_tskim_s
*tskim
;
1078 struct bfa_ioim_s
*ioim
;
1079 struct list_head
*qe
, *qen
;
1081 list_for_each_safe(qe
, qen
, &itnim
->tsk_q
) {
1082 tskim
= (struct bfa_tskim_s
*) qe
;
1083 bfa_tskim_iocdisable(tskim
);
1086 list_for_each_safe(qe
, qen
, &itnim
->io_q
) {
1087 ioim
= (struct bfa_ioim_s
*) qe
;
1088 bfa_ioim_iocdisable(ioim
);
1092 * For IO request in pending queue, we pretend an early timeout.
1094 list_for_each_safe(qe
, qen
, &itnim
->pending_q
) {
1095 ioim
= (struct bfa_ioim_s
*) qe
;
1099 list_for_each_safe(qe
, qen
, &itnim
->io_cleanup_q
) {
1100 ioim
= (struct bfa_ioim_s
*) qe
;
1101 bfa_ioim_iocdisable(ioim
);
1106 * IO cleanup completion
1109 bfa_itnim_cleanp_comp(void *itnim_cbarg
)
1111 struct bfa_itnim_s
*itnim
= itnim_cbarg
;
1113 bfa_stats(itnim
, cleanup_comps
);
1114 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_CLEANUP
);
1118 * Initiate cleanup of all IOs.
1121 bfa_itnim_cleanup(struct bfa_itnim_s
*itnim
)
1123 struct bfa_ioim_s
*ioim
;
1124 struct bfa_tskim_s
*tskim
;
1125 struct list_head
*qe
, *qen
;
1127 bfa_wc_init(&itnim
->wc
, bfa_itnim_cleanp_comp
, itnim
);
1129 list_for_each_safe(qe
, qen
, &itnim
->io_q
) {
1130 ioim
= (struct bfa_ioim_s
*) qe
;
1133 * Move IO to a cleanup queue from active queue so that a later
1134 * TM will not pickup this IO.
1136 list_del(&ioim
->qe
);
1137 list_add_tail(&ioim
->qe
, &itnim
->io_cleanup_q
);
1139 bfa_wc_up(&itnim
->wc
);
1140 bfa_ioim_cleanup(ioim
);
1143 list_for_each_safe(qe
, qen
, &itnim
->tsk_q
) {
1144 tskim
= (struct bfa_tskim_s
*) qe
;
1145 bfa_wc_up(&itnim
->wc
);
1146 bfa_tskim_cleanup(tskim
);
1149 bfa_wc_wait(&itnim
->wc
);
1153 __bfa_cb_itnim_online(void *cbarg
, bfa_boolean_t complete
)
1155 struct bfa_itnim_s
*itnim
= cbarg
;
1158 bfa_cb_itnim_online(itnim
->ditn
);
1162 __bfa_cb_itnim_offline(void *cbarg
, bfa_boolean_t complete
)
1164 struct bfa_itnim_s
*itnim
= cbarg
;
1167 bfa_cb_itnim_offline(itnim
->ditn
);
1171 __bfa_cb_itnim_sler(void *cbarg
, bfa_boolean_t complete
)
1173 struct bfa_itnim_s
*itnim
= cbarg
;
1176 bfa_cb_itnim_sler(itnim
->ditn
);
1180 * Call to resume any I/O requests waiting for room in request queue.
1183 bfa_itnim_qresume(void *cbarg
)
1185 struct bfa_itnim_s
*itnim
= cbarg
;
1187 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_QRESUME
);
1198 bfa_itnim_iodone(struct bfa_itnim_s
*itnim
)
1200 bfa_wc_down(&itnim
->wc
);
1204 bfa_itnim_tskdone(struct bfa_itnim_s
*itnim
)
1206 bfa_wc_down(&itnim
->wc
);
1210 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
1216 *km_len
+= cfg
->fwcfg
.num_rports
* sizeof(struct bfa_itnim_s
);
1220 bfa_itnim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
1222 struct bfa_s
*bfa
= fcpim
->bfa
;
1223 struct bfa_itnim_s
*itnim
;
1226 INIT_LIST_HEAD(&fcpim
->itnim_q
);
1228 itnim
= (struct bfa_itnim_s
*) bfa_meminfo_kva(minfo
);
1229 fcpim
->itnim_arr
= itnim
;
1231 for (i
= 0; i
< fcpim
->num_itnims
; i
++, itnim
++) {
1232 bfa_os_memset(itnim
, 0, sizeof(struct bfa_itnim_s
));
1234 itnim
->fcpim
= fcpim
;
1235 itnim
->reqq
= BFA_REQQ_QOS_LO
;
1236 itnim
->rport
= BFA_RPORT_FROM_TAG(bfa
, i
);
1237 itnim
->iotov_active
= BFA_FALSE
;
1238 bfa_reqq_winit(&itnim
->reqq_wait
, bfa_itnim_qresume
, itnim
);
1240 INIT_LIST_HEAD(&itnim
->io_q
);
1241 INIT_LIST_HEAD(&itnim
->io_cleanup_q
);
1242 INIT_LIST_HEAD(&itnim
->pending_q
);
1243 INIT_LIST_HEAD(&itnim
->tsk_q
);
1244 INIT_LIST_HEAD(&itnim
->delay_comp_q
);
1245 for (j
= 0; j
< BFA_IOBUCKET_MAX
; j
++)
1246 itnim
->ioprofile
.io_latency
.min
[j
] = ~0;
1247 bfa_sm_set_state(itnim
, bfa_itnim_sm_uninit
);
1250 bfa_meminfo_kva(minfo
) = (u8
*) itnim
;
1254 bfa_itnim_iocdisable(struct bfa_itnim_s
*itnim
)
1256 bfa_stats(itnim
, ioc_disabled
);
1257 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_HWFAIL
);
1260 static bfa_boolean_t
1261 bfa_itnim_send_fwcreate(struct bfa_itnim_s
*itnim
)
1263 struct bfi_itnim_create_req_s
*m
;
1268 * check for room in queue to send request now
1270 m
= bfa_reqq_next(itnim
->bfa
, itnim
->reqq
);
1272 bfa_reqq_wait(itnim
->bfa
, itnim
->reqq
, &itnim
->reqq_wait
);
1276 bfi_h2i_set(m
->mh
, BFI_MC_ITNIM
, BFI_ITNIM_H2I_CREATE_REQ
,
1277 bfa_lpuid(itnim
->bfa
));
1278 m
->fw_handle
= itnim
->rport
->fw_handle
;
1279 m
->class = FC_CLASS_3
;
1280 m
->seq_rec
= itnim
->seq_rec
;
1281 m
->msg_no
= itnim
->msg_no
;
1282 bfa_stats(itnim
, fw_create
);
1285 * queue I/O message to firmware
1287 bfa_reqq_produce(itnim
->bfa
, itnim
->reqq
);
1291 static bfa_boolean_t
1292 bfa_itnim_send_fwdelete(struct bfa_itnim_s
*itnim
)
1294 struct bfi_itnim_delete_req_s
*m
;
1297 * check for room in queue to send request now
1299 m
= bfa_reqq_next(itnim
->bfa
, itnim
->reqq
);
1301 bfa_reqq_wait(itnim
->bfa
, itnim
->reqq
, &itnim
->reqq_wait
);
1305 bfi_h2i_set(m
->mh
, BFI_MC_ITNIM
, BFI_ITNIM_H2I_DELETE_REQ
,
1306 bfa_lpuid(itnim
->bfa
));
1307 m
->fw_handle
= itnim
->rport
->fw_handle
;
1308 bfa_stats(itnim
, fw_delete
);
1311 * queue I/O message to firmware
1313 bfa_reqq_produce(itnim
->bfa
, itnim
->reqq
);
1318 * Cleanup all pending failed inflight requests.
1321 bfa_itnim_delayed_comp(struct bfa_itnim_s
*itnim
, bfa_boolean_t iotov
)
1323 struct bfa_ioim_s
*ioim
;
1324 struct list_head
*qe
, *qen
;
1326 list_for_each_safe(qe
, qen
, &itnim
->delay_comp_q
) {
1327 ioim
= (struct bfa_ioim_s
*)qe
;
1328 bfa_ioim_delayed_comp(ioim
, iotov
);
1333 * Start all pending IO requests.
1336 bfa_itnim_iotov_online(struct bfa_itnim_s
*itnim
)
1338 struct bfa_ioim_s
*ioim
;
1340 bfa_itnim_iotov_stop(itnim
);
1343 * Abort all inflight IO requests in the queue
1345 bfa_itnim_delayed_comp(itnim
, BFA_FALSE
);
1348 * Start all pending IO requests.
1350 while (!list_empty(&itnim
->pending_q
)) {
1351 bfa_q_deq(&itnim
->pending_q
, &ioim
);
1352 list_add_tail(&ioim
->qe
, &itnim
->io_q
);
1353 bfa_ioim_start(ioim
);
1358 * Fail all pending IO requests
1361 bfa_itnim_iotov_cleanup(struct bfa_itnim_s
*itnim
)
1363 struct bfa_ioim_s
*ioim
;
1366 * Fail all inflight IO requests in the queue
1368 bfa_itnim_delayed_comp(itnim
, BFA_TRUE
);
1371 * Fail any pending IO requests.
1373 while (!list_empty(&itnim
->pending_q
)) {
1374 bfa_q_deq(&itnim
->pending_q
, &ioim
);
1375 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
1381 * IO TOV timer callback. Fail any pending IO requests.
1384 bfa_itnim_iotov(void *itnim_arg
)
1386 struct bfa_itnim_s
*itnim
= itnim_arg
;
1388 itnim
->iotov_active
= BFA_FALSE
;
1390 bfa_cb_itnim_tov_begin(itnim
->ditn
);
1391 bfa_itnim_iotov_cleanup(itnim
);
1392 bfa_cb_itnim_tov(itnim
->ditn
);
1396 * Start IO TOV timer for failing back pending IO requests in offline state.
1399 bfa_itnim_iotov_start(struct bfa_itnim_s
*itnim
)
1401 if (itnim
->fcpim
->path_tov
> 0) {
1403 itnim
->iotov_active
= BFA_TRUE
;
1404 bfa_assert(bfa_itnim_hold_io(itnim
));
1405 bfa_timer_start(itnim
->bfa
, &itnim
->timer
,
1406 bfa_itnim_iotov
, itnim
, itnim
->fcpim
->path_tov
);
1411 * Stop IO TOV timer.
1414 bfa_itnim_iotov_stop(struct bfa_itnim_s
*itnim
)
1416 if (itnim
->iotov_active
) {
1417 itnim
->iotov_active
= BFA_FALSE
;
1418 bfa_timer_stop(&itnim
->timer
);
1423 * Stop IO TOV timer.
1426 bfa_itnim_iotov_delete(struct bfa_itnim_s
*itnim
)
1428 bfa_boolean_t pathtov_active
= BFA_FALSE
;
1430 if (itnim
->iotov_active
)
1431 pathtov_active
= BFA_TRUE
;
1433 bfa_itnim_iotov_stop(itnim
);
1435 bfa_cb_itnim_tov_begin(itnim
->ditn
);
1436 bfa_itnim_iotov_cleanup(itnim
);
1438 bfa_cb_itnim_tov(itnim
->ditn
);
1442 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s
*itnim
)
1444 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(itnim
->bfa
);
1445 fcpim
->del_itn_stats
.del_itn_iocomp_aborted
+=
1446 itnim
->stats
.iocomp_aborted
;
1447 fcpim
->del_itn_stats
.del_itn_iocomp_timedout
+=
1448 itnim
->stats
.iocomp_timedout
;
1449 fcpim
->del_itn_stats
.del_itn_iocom_sqer_needed
+=
1450 itnim
->stats
.iocom_sqer_needed
;
1451 fcpim
->del_itn_stats
.del_itn_iocom_res_free
+=
1452 itnim
->stats
.iocom_res_free
;
1453 fcpim
->del_itn_stats
.del_itn_iocom_hostabrts
+=
1454 itnim
->stats
.iocom_hostabrts
;
1455 fcpim
->del_itn_stats
.del_itn_total_ios
+= itnim
->stats
.total_ios
;
1456 fcpim
->del_itn_stats
.del_io_iocdowns
+= itnim
->stats
.io_iocdowns
;
1457 fcpim
->del_itn_stats
.del_tm_iocdowns
+= itnim
->stats
.tm_iocdowns
;
1467 * Itnim interrupt processing.
1470 bfa_itnim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1472 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1473 union bfi_itnim_i2h_msg_u msg
;
1474 struct bfa_itnim_s
*itnim
;
1476 bfa_trc(bfa
, m
->mhdr
.msg_id
);
1480 switch (m
->mhdr
.msg_id
) {
1481 case BFI_ITNIM_I2H_CREATE_RSP
:
1482 itnim
= BFA_ITNIM_FROM_TAG(fcpim
,
1483 msg
.create_rsp
->bfa_handle
);
1484 bfa_assert(msg
.create_rsp
->status
== BFA_STATUS_OK
);
1485 bfa_stats(itnim
, create_comps
);
1486 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_FWRSP
);
1489 case BFI_ITNIM_I2H_DELETE_RSP
:
1490 itnim
= BFA_ITNIM_FROM_TAG(fcpim
,
1491 msg
.delete_rsp
->bfa_handle
);
1492 bfa_assert(msg
.delete_rsp
->status
== BFA_STATUS_OK
);
1493 bfa_stats(itnim
, delete_comps
);
1494 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_FWRSP
);
1497 case BFI_ITNIM_I2H_SLER_EVENT
:
1498 itnim
= BFA_ITNIM_FROM_TAG(fcpim
,
1499 msg
.sler_event
->bfa_handle
);
1500 bfa_stats(itnim
, sler_events
);
1501 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_SLER
);
1505 bfa_trc(bfa
, m
->mhdr
.msg_id
);
1516 struct bfa_itnim_s
*
1517 bfa_itnim_create(struct bfa_s
*bfa
, struct bfa_rport_s
*rport
, void *ditn
)
1519 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1520 struct bfa_itnim_s
*itnim
;
1522 itnim
= BFA_ITNIM_FROM_TAG(fcpim
, rport
->rport_tag
);
1523 bfa_assert(itnim
->rport
== rport
);
1527 bfa_stats(itnim
, creates
);
1528 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_CREATE
);
1534 bfa_itnim_delete(struct bfa_itnim_s
*itnim
)
1536 bfa_stats(itnim
, deletes
);
1537 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_DELETE
);
1541 bfa_itnim_online(struct bfa_itnim_s
*itnim
, bfa_boolean_t seq_rec
)
1543 itnim
->seq_rec
= seq_rec
;
1544 bfa_stats(itnim
, onlines
);
1545 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_ONLINE
);
1549 bfa_itnim_offline(struct bfa_itnim_s
*itnim
)
1551 bfa_stats(itnim
, offlines
);
1552 bfa_sm_send_event(itnim
, BFA_ITNIM_SM_OFFLINE
);
1556 * Return true if itnim is considered offline for holding off IO request.
1557 * IO is not held if itnim is being deleted.
1560 bfa_itnim_hold_io(struct bfa_itnim_s
*itnim
)
1562 return itnim
->fcpim
->path_tov
&& itnim
->iotov_active
&&
1563 (bfa_sm_cmp_state(itnim
, bfa_itnim_sm_fwcreate
) ||
1564 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_sler
) ||
1565 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_cleanup_offline
) ||
1566 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_fwdelete
) ||
1567 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_offline
) ||
1568 bfa_sm_cmp_state(itnim
, bfa_itnim_sm_iocdisable
));
1572 bfa_itnim_get_ioprofile(struct bfa_itnim_s
*itnim
,
1573 struct bfa_itnim_ioprofile_s
*ioprofile
)
1575 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(itnim
->bfa
);
1576 if (!fcpim
->io_profile
)
1577 return BFA_STATUS_IOPROFILE_OFF
;
1579 itnim
->ioprofile
.index
= BFA_IOBUCKET_MAX
;
1580 itnim
->ioprofile
.io_profile_start_time
=
1581 bfa_io_profile_start_time(itnim
->bfa
);
1582 itnim
->ioprofile
.clock_res_mul
= bfa_io_lat_clock_res_mul
;
1583 itnim
->ioprofile
.clock_res_div
= bfa_io_lat_clock_res_div
;
1584 *ioprofile
= itnim
->ioprofile
;
1586 return BFA_STATUS_OK
;
1590 bfa_itnim_get_stats(struct bfa_itnim_s
*itnim
,
1591 struct bfa_itnim_iostats_s
*stats
)
1593 *stats
= itnim
->stats
;
1597 bfa_itnim_clear_stats(struct bfa_itnim_s
*itnim
)
1600 bfa_os_memset(&itnim
->stats
, 0, sizeof(itnim
->stats
));
1601 bfa_os_memset(&itnim
->ioprofile
, 0, sizeof(itnim
->ioprofile
));
1602 for (j
= 0; j
< BFA_IOBUCKET_MAX
; j
++)
1603 itnim
->ioprofile
.io_latency
.min
[j
] = ~0;
1607 * BFA IO module state machine functions
1611 * IO is not started (unallocated).
1614 bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1616 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1617 bfa_trc_fp(ioim
->bfa
, event
);
1620 case BFA_IOIM_SM_START
:
1621 if (!bfa_itnim_is_online(ioim
->itnim
)) {
1622 if (!bfa_itnim_hold_io(ioim
->itnim
)) {
1623 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1624 list_del(&ioim
->qe
);
1625 list_add_tail(&ioim
->qe
,
1626 &ioim
->fcpim
->ioim_comp_q
);
1627 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1628 __bfa_cb_ioim_pathtov
, ioim
);
1630 list_del(&ioim
->qe
);
1631 list_add_tail(&ioim
->qe
,
1632 &ioim
->itnim
->pending_q
);
1637 if (ioim
->nsges
> BFI_SGE_INLINE
) {
1638 if (!bfa_ioim_sge_setup(ioim
)) {
1639 bfa_sm_set_state(ioim
, bfa_ioim_sm_sgalloc
);
1644 if (!bfa_ioim_send_ioreq(ioim
)) {
1645 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
1649 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1652 case BFA_IOIM_SM_IOTOV
:
1653 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1654 bfa_ioim_move_to_comp_q(ioim
);
1655 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1656 __bfa_cb_ioim_pathtov
, ioim
);
1659 case BFA_IOIM_SM_ABORT
:
1661 * IO in pending queue can get abort requests. Complete abort
1662 * requests immediately.
1664 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1665 bfa_assert(bfa_q_is_on_q(&ioim
->itnim
->pending_q
, ioim
));
1666 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1667 __bfa_cb_ioim_abort
, ioim
);
1671 bfa_sm_fault(ioim
->bfa
, event
);
1676 * IO is waiting for SG pages.
1679 bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1681 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1682 bfa_trc(ioim
->bfa
, event
);
1685 case BFA_IOIM_SM_SGALLOCED
:
1686 if (!bfa_ioim_send_ioreq(ioim
)) {
1687 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
1690 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1693 case BFA_IOIM_SM_CLEANUP
:
1694 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1695 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
1696 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1698 bfa_ioim_notify_cleanup(ioim
);
1701 case BFA_IOIM_SM_ABORT
:
1702 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1703 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
1704 bfa_ioim_move_to_comp_q(ioim
);
1705 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1709 case BFA_IOIM_SM_HWFAIL
:
1710 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1711 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
1712 bfa_ioim_move_to_comp_q(ioim
);
1713 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1718 bfa_sm_fault(ioim
->bfa
, event
);
1726 bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1728 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1729 bfa_trc_fp(ioim
->bfa
, event
);
1732 case BFA_IOIM_SM_COMP_GOOD
:
1733 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1734 bfa_ioim_move_to_comp_q(ioim
);
1735 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1736 __bfa_cb_ioim_good_comp
, ioim
);
1739 case BFA_IOIM_SM_COMP
:
1740 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1741 bfa_ioim_move_to_comp_q(ioim
);
1742 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
1746 case BFA_IOIM_SM_DONE
:
1747 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1748 bfa_ioim_move_to_comp_q(ioim
);
1749 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
1753 case BFA_IOIM_SM_ABORT
:
1754 ioim
->iosp
->abort_explicit
= BFA_TRUE
;
1755 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
1757 if (bfa_ioim_send_abort(ioim
))
1758 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
1760 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort_qfull
);
1761 bfa_stats(ioim
->itnim
, qwait
);
1762 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1763 &ioim
->iosp
->reqq_wait
);
1767 case BFA_IOIM_SM_CLEANUP
:
1768 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
1769 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
1771 if (bfa_ioim_send_abort(ioim
))
1772 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
1774 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
1775 bfa_stats(ioim
->itnim
, qwait
);
1776 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1777 &ioim
->iosp
->reqq_wait
);
1781 case BFA_IOIM_SM_HWFAIL
:
1782 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1783 bfa_ioim_move_to_comp_q(ioim
);
1784 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1788 case BFA_IOIM_SM_SQRETRY
:
1789 if (bfa_ioim_get_iotag(ioim
) != BFA_TRUE
) {
1790 /* max retry completed free IO */
1791 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1792 bfa_ioim_move_to_comp_q(ioim
);
1793 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1794 __bfa_cb_ioim_failed
, ioim
);
1797 /* waiting for IO tag resource free */
1798 bfa_sm_set_state(ioim
, bfa_ioim_sm_cmnd_retry
);
1802 bfa_sm_fault(ioim
->bfa
, event
);
1807 * IO is retried with new tag.
1810 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1812 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1813 bfa_trc_fp(ioim
->bfa
, event
);
1816 case BFA_IOIM_SM_FREE
:
1817 /* abts and rrq done. Now retry the IO with new tag */
1818 if (!bfa_ioim_send_ioreq(ioim
)) {
1819 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
1822 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1825 case BFA_IOIM_SM_CLEANUP
:
1826 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
1827 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
1829 if (bfa_ioim_send_abort(ioim
))
1830 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
1832 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
1833 bfa_stats(ioim
->itnim
, qwait
);
1834 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1835 &ioim
->iosp
->reqq_wait
);
1839 case BFA_IOIM_SM_HWFAIL
:
1840 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1841 bfa_ioim_move_to_comp_q(ioim
);
1842 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
1843 __bfa_cb_ioim_failed
, ioim
);
1846 case BFA_IOIM_SM_ABORT
:
1847 /** in this state IO abort is done.
1848 * Waiting for IO tag resource free.
1850 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1851 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1856 bfa_sm_fault(ioim
->bfa
, event
);
1861 * IO is being aborted, waiting for completion from firmware.
1864 bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1866 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1867 bfa_trc(ioim
->bfa
, event
);
1870 case BFA_IOIM_SM_COMP_GOOD
:
1871 case BFA_IOIM_SM_COMP
:
1872 case BFA_IOIM_SM_DONE
:
1873 case BFA_IOIM_SM_FREE
:
1876 case BFA_IOIM_SM_ABORT_DONE
:
1877 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1878 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1882 case BFA_IOIM_SM_ABORT_COMP
:
1883 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1884 bfa_ioim_move_to_comp_q(ioim
);
1885 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1889 case BFA_IOIM_SM_COMP_UTAG
:
1890 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1891 bfa_ioim_move_to_comp_q(ioim
);
1892 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
1896 case BFA_IOIM_SM_CLEANUP
:
1897 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
1898 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
1900 if (bfa_ioim_send_abort(ioim
))
1901 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
1903 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
1904 bfa_stats(ioim
->itnim
, qwait
);
1905 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
1906 &ioim
->iosp
->reqq_wait
);
1910 case BFA_IOIM_SM_HWFAIL
:
1911 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1912 bfa_ioim_move_to_comp_q(ioim
);
1913 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1918 bfa_sm_fault(ioim
->bfa
, event
);
1923 * IO is being cleaned up (implicit abort), waiting for completion from
1927 bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1929 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1930 bfa_trc(ioim
->bfa
, event
);
1933 case BFA_IOIM_SM_COMP_GOOD
:
1934 case BFA_IOIM_SM_COMP
:
1935 case BFA_IOIM_SM_DONE
:
1936 case BFA_IOIM_SM_FREE
:
1939 case BFA_IOIM_SM_ABORT
:
1941 * IO is already being aborted implicitly
1943 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
1946 case BFA_IOIM_SM_ABORT_DONE
:
1947 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
1948 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1949 bfa_ioim_notify_cleanup(ioim
);
1952 case BFA_IOIM_SM_ABORT_COMP
:
1953 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1954 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1955 bfa_ioim_notify_cleanup(ioim
);
1958 case BFA_IOIM_SM_COMP_UTAG
:
1959 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1960 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1961 bfa_ioim_notify_cleanup(ioim
);
1964 case BFA_IOIM_SM_HWFAIL
:
1965 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
1966 bfa_ioim_move_to_comp_q(ioim
);
1967 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
1971 case BFA_IOIM_SM_CLEANUP
:
1973 * IO can be in cleanup state already due to TM command.
1974 * 2nd cleanup request comes from ITN offline event.
1979 bfa_sm_fault(ioim
->bfa
, event
);
1984 * IO is waiting for room in request CQ
1987 bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
1989 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1990 bfa_trc(ioim
->bfa
, event
);
1993 case BFA_IOIM_SM_QRESUME
:
1994 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
1995 bfa_ioim_send_ioreq(ioim
);
1998 case BFA_IOIM_SM_ABORT
:
1999 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2000 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2001 bfa_ioim_move_to_comp_q(ioim
);
2002 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
2006 case BFA_IOIM_SM_CLEANUP
:
2007 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2008 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2009 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
2011 bfa_ioim_notify_cleanup(ioim
);
2014 case BFA_IOIM_SM_HWFAIL
:
2015 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2016 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2017 bfa_ioim_move_to_comp_q(ioim
);
2018 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
2023 bfa_sm_fault(ioim
->bfa
, event
);
2028 * Active IO is being aborted, waiting for room in request CQ.
2031 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
2033 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2034 bfa_trc(ioim
->bfa
, event
);
2037 case BFA_IOIM_SM_QRESUME
:
2038 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
2039 bfa_ioim_send_abort(ioim
);
2042 case BFA_IOIM_SM_CLEANUP
:
2043 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
2044 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
2045 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
2048 case BFA_IOIM_SM_COMP_GOOD
:
2049 case BFA_IOIM_SM_COMP
:
2050 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2051 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2052 bfa_ioim_move_to_comp_q(ioim
);
2053 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
2057 case BFA_IOIM_SM_DONE
:
2058 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
2059 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2060 bfa_ioim_move_to_comp_q(ioim
);
2061 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
2065 case BFA_IOIM_SM_HWFAIL
:
2066 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2067 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2068 bfa_ioim_move_to_comp_q(ioim
);
2069 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
2074 bfa_sm_fault(ioim
->bfa
, event
);
2079 * Active IO is being cleaned up, waiting for room in request CQ.
2082 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
2084 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2085 bfa_trc(ioim
->bfa
, event
);
2088 case BFA_IOIM_SM_QRESUME
:
2089 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
2090 bfa_ioim_send_abort(ioim
);
2093 case BFA_IOIM_SM_ABORT
:
2095 * IO is already being cleaned up implicitly
2097 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
2100 case BFA_IOIM_SM_COMP_GOOD
:
2101 case BFA_IOIM_SM_COMP
:
2102 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2103 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2104 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
2105 bfa_ioim_notify_cleanup(ioim
);
2108 case BFA_IOIM_SM_DONE
:
2109 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
2110 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2111 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
2112 bfa_ioim_notify_cleanup(ioim
);
2115 case BFA_IOIM_SM_HWFAIL
:
2116 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2117 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
2118 bfa_ioim_move_to_comp_q(ioim
);
2119 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
2124 bfa_sm_fault(ioim
->bfa
, event
);
2129 * IO bfa callback is pending.
2132 bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
2134 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2135 bfa_trc_fp(ioim
->bfa
, event
);
2138 case BFA_IOIM_SM_HCB
:
2139 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
2140 bfa_ioim_free(ioim
);
2143 case BFA_IOIM_SM_CLEANUP
:
2144 bfa_ioim_notify_cleanup(ioim
);
2147 case BFA_IOIM_SM_HWFAIL
:
2151 bfa_sm_fault(ioim
->bfa
, event
);
2156 * IO bfa callback is pending. IO resource cannot be freed.
2159 bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
2161 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2162 bfa_trc(ioim
->bfa
, event
);
2165 case BFA_IOIM_SM_HCB
:
2166 bfa_sm_set_state(ioim
, bfa_ioim_sm_resfree
);
2167 list_del(&ioim
->qe
);
2168 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_resfree_q
);
2171 case BFA_IOIM_SM_FREE
:
2172 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2175 case BFA_IOIM_SM_CLEANUP
:
2176 bfa_ioim_notify_cleanup(ioim
);
2179 case BFA_IOIM_SM_HWFAIL
:
2180 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
2184 bfa_sm_fault(ioim
->bfa
, event
);
2189 * IO is completed, waiting resource free from firmware.
2192 bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
2194 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2195 bfa_trc(ioim
->bfa
, event
);
2198 case BFA_IOIM_SM_FREE
:
2199 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
2200 bfa_ioim_free(ioim
);
2203 case BFA_IOIM_SM_CLEANUP
:
2204 bfa_ioim_notify_cleanup(ioim
);
2207 case BFA_IOIM_SM_HWFAIL
:
2211 bfa_sm_fault(ioim
->bfa
, event
);
2222 __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
)
2224 struct bfa_ioim_s
*ioim
= cbarg
;
2227 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2231 bfa_cb_ioim_good_comp(ioim
->bfa
->bfad
, ioim
->dio
);
2235 __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
)
2237 struct bfa_ioim_s
*ioim
= cbarg
;
2238 struct bfi_ioim_rsp_s
*m
;
2244 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2248 m
= (struct bfi_ioim_rsp_s
*) &ioim
->iosp
->comp_rspmsg
;
2249 if (m
->io_status
== BFI_IOIM_STS_OK
) {
2251 * setup sense information, if present
2253 if ((m
->scsi_status
== SCSI_STATUS_CHECK_CONDITION
) &&
2255 sns_len
= m
->sns_len
;
2256 snsinfo
= ioim
->iosp
->snsinfo
;
2260 * setup residue value correctly for normal completions
2262 if (m
->resid_flags
== FCP_RESID_UNDER
) {
2263 residue
= bfa_os_ntohl(m
->residue
);
2264 bfa_stats(ioim
->itnim
, iocomp_underrun
);
2266 if (m
->resid_flags
== FCP_RESID_OVER
) {
2267 residue
= bfa_os_ntohl(m
->residue
);
2269 bfa_stats(ioim
->itnim
, iocomp_overrun
);
2273 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, m
->io_status
,
2274 m
->scsi_status
, sns_len
, snsinfo
, residue
);
2278 __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
)
2280 struct bfa_ioim_s
*ioim
= cbarg
;
2283 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2287 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_ABORTED
,
2292 __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
)
2294 struct bfa_ioim_s
*ioim
= cbarg
;
2296 bfa_stats(ioim
->itnim
, path_tov_expired
);
2298 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2302 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_PATHTOV
,
2307 __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
)
2309 struct bfa_ioim_s
*ioim
= cbarg
;
2312 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
2316 bfa_cb_ioim_abort(ioim
->bfa
->bfad
, ioim
->dio
);
2320 bfa_ioim_sgpg_alloced(void *cbarg
)
2322 struct bfa_ioim_s
*ioim
= cbarg
;
2324 ioim
->nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
2325 list_splice_tail_init(&ioim
->iosp
->sgpg_wqe
.sgpg_q
, &ioim
->sgpg_q
);
2326 bfa_ioim_sgpg_setup(ioim
);
2327 bfa_sm_send_event(ioim
, BFA_IOIM_SM_SGALLOCED
);
2331 * Send I/O request to firmware.
2333 static bfa_boolean_t
2334 bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
)
2336 struct bfa_itnim_s
*itnim
= ioim
->itnim
;
2337 struct bfi_ioim_req_s
*m
;
2338 static struct fcp_cmnd_s cmnd_z0
= { 0 };
2339 struct bfi_sge_s
*sge
;
2343 struct scatterlist
*sg
;
2344 struct scsi_cmnd
*cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
2347 * check for room in queue to send request now
2349 m
= bfa_reqq_next(ioim
->bfa
, ioim
->reqq
);
2351 bfa_stats(ioim
->itnim
, qwait
);
2352 bfa_reqq_wait(ioim
->bfa
, ioim
->reqq
,
2353 &ioim
->iosp
->reqq_wait
);
2358 * build i/o request message next
2360 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
2361 m
->rport_hdl
= ioim
->itnim
->rport
->fw_handle
;
2362 m
->io_timeout
= bfa_cb_ioim_get_timeout(ioim
->dio
);
2365 * build inline IO SG element here
2369 sg
= (struct scatterlist
*)scsi_sglist(cmnd
);
2370 addr
= bfa_os_sgaddr(sg_dma_address(sg
));
2371 sge
->sga
= *(union bfi_addr_u
*) &addr
;
2372 pgdlen
= sg_dma_len(sg
);
2373 sge
->sg_len
= pgdlen
;
2374 sge
->flags
= (ioim
->nsges
> BFI_SGE_INLINE
) ?
2375 BFI_SGE_DATA_CPL
: BFI_SGE_DATA_LAST
;
2380 if (ioim
->nsges
> BFI_SGE_INLINE
) {
2381 sge
->sga
= ioim
->sgpg
->sgpg_pa
;
2383 sge
->sga
.a32
.addr_lo
= 0;
2384 sge
->sga
.a32
.addr_hi
= 0;
2386 sge
->sg_len
= pgdlen
;
2387 sge
->flags
= BFI_SGE_PGDLEN
;
2391 * set up I/O command parameters
2393 bfa_os_assign(m
->cmnd
, cmnd_z0
);
2394 m
->cmnd
.lun
= bfa_cb_ioim_get_lun(ioim
->dio
);
2395 m
->cmnd
.iodir
= bfa_cb_ioim_get_iodir(ioim
->dio
);
2396 bfa_os_assign(m
->cmnd
.cdb
,
2397 *(scsi_cdb_t
*)bfa_cb_ioim_get_cdb(ioim
->dio
));
2398 fcp_dl
= bfa_cb_ioim_get_size(ioim
->dio
);
2399 m
->cmnd
.fcp_dl
= bfa_os_htonl(fcp_dl
);
2402 * set up I/O message header
2404 switch (m
->cmnd
.iodir
) {
2405 case FCP_IODIR_READ
:
2406 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_READ
, 0, bfa_lpuid(ioim
->bfa
));
2407 bfa_stats(itnim
, input_reqs
);
2408 ioim
->itnim
->stats
.rd_throughput
+= fcp_dl
;
2410 case FCP_IODIR_WRITE
:
2411 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_WRITE
, 0, bfa_lpuid(ioim
->bfa
));
2412 bfa_stats(itnim
, output_reqs
);
2413 ioim
->itnim
->stats
.wr_throughput
+= fcp_dl
;
2416 bfa_stats(itnim
, input_reqs
);
2417 bfa_stats(itnim
, output_reqs
);
2419 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
2421 if (itnim
->seq_rec
||
2422 (bfa_cb_ioim_get_size(ioim
->dio
) & (sizeof(u32
) - 1)))
2423 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
2425 #ifdef IOIM_ADVANCED
2426 m
->cmnd
.crn
= bfa_cb_ioim_get_crn(ioim
->dio
);
2427 m
->cmnd
.priority
= bfa_cb_ioim_get_priority(ioim
->dio
);
2428 m
->cmnd
.taskattr
= bfa_cb_ioim_get_taskattr(ioim
->dio
);
2431 * Handle large CDB (>16 bytes).
2433 m
->cmnd
.addl_cdb_len
= (bfa_cb_ioim_get_cdblen(ioim
->dio
) -
2434 FCP_CMND_CDB_LEN
) / sizeof(u32
);
2435 if (m
->cmnd
.addl_cdb_len
) {
2436 bfa_os_memcpy(&m
->cmnd
.cdb
+ 1, (scsi_cdb_t
*)
2437 bfa_cb_ioim_get_cdb(ioim
->dio
) + 1,
2438 m
->cmnd
.addl_cdb_len
* sizeof(u32
));
2439 fcp_cmnd_fcpdl(&m
->cmnd
) =
2440 bfa_os_htonl(bfa_cb_ioim_get_size(ioim
->dio
));
2445 * queue I/O message to firmware
2447 bfa_reqq_produce(ioim
->bfa
, ioim
->reqq
);
2452 * Setup any additional SG pages needed.Inline SG element is setup
2455 static bfa_boolean_t
2456 bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
)
2460 bfa_assert(ioim
->nsges
> BFI_SGE_INLINE
);
2463 * allocate SG pages needed
2465 nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
2469 if (bfa_sgpg_malloc(ioim
->bfa
, &ioim
->sgpg_q
, nsgpgs
)
2471 bfa_sgpg_wait(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
, nsgpgs
);
2475 ioim
->nsgpgs
= nsgpgs
;
2476 bfa_ioim_sgpg_setup(ioim
);
2482 bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
)
2484 int sgeid
, nsges
, i
;
2485 struct bfi_sge_s
*sge
;
2486 struct bfa_sgpg_s
*sgpg
;
2489 struct scatterlist
*sg
;
2490 struct scsi_cmnd
*cmnd
= (struct scsi_cmnd
*) ioim
->dio
;
2492 sgeid
= BFI_SGE_INLINE
;
2493 ioim
->sgpg
= sgpg
= bfa_q_first(&ioim
->sgpg_q
);
2495 sg
= scsi_sglist(cmnd
);
2499 sge
= sgpg
->sgpg
->sges
;
2500 nsges
= ioim
->nsges
- sgeid
;
2501 if (nsges
> BFI_SGPG_DATA_SGES
)
2502 nsges
= BFI_SGPG_DATA_SGES
;
2505 for (i
= 0; i
< nsges
; i
++, sge
++, sgeid
++, sg
= sg_next(sg
)) {
2506 addr
= bfa_os_sgaddr(sg_dma_address(sg
));
2507 sge
->sga
= *(union bfi_addr_u
*) &addr
;
2508 sge
->sg_len
= sg_dma_len(sg
);
2509 pgcumsz
+= sge
->sg_len
;
2514 if (i
< (nsges
- 1))
2515 sge
->flags
= BFI_SGE_DATA
;
2516 else if (sgeid
< (ioim
->nsges
- 1))
2517 sge
->flags
= BFI_SGE_DATA_CPL
;
2519 sge
->flags
= BFI_SGE_DATA_LAST
;
2524 sgpg
= (struct bfa_sgpg_s
*) bfa_q_next(sgpg
);
2527 * set the link element of each page
2529 if (sgeid
== ioim
->nsges
) {
2530 sge
->flags
= BFI_SGE_PGDLEN
;
2531 sge
->sga
.a32
.addr_lo
= 0;
2532 sge
->sga
.a32
.addr_hi
= 0;
2534 sge
->flags
= BFI_SGE_LINK
;
2535 sge
->sga
= sgpg
->sgpg_pa
;
2537 sge
->sg_len
= pgcumsz
;
2540 } while (sgeid
< ioim
->nsges
);
2544 * Send I/O abort request to firmware.
2546 static bfa_boolean_t
2547 bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
)
2549 struct bfi_ioim_abort_req_s
*m
;
2550 enum bfi_ioim_h2i msgop
;
2553 * check for room in queue to send request now
2555 m
= bfa_reqq_next(ioim
->bfa
, ioim
->reqq
);
2560 * build i/o request message next
2562 if (ioim
->iosp
->abort_explicit
)
2563 msgop
= BFI_IOIM_H2I_IOABORT_REQ
;
2565 msgop
= BFI_IOIM_H2I_IOCLEANUP_REQ
;
2567 bfi_h2i_set(m
->mh
, BFI_MC_IOIM
, msgop
, bfa_lpuid(ioim
->bfa
));
2568 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
2569 m
->abort_tag
= ++ioim
->abort_tag
;
2572 * queue I/O message to firmware
2574 bfa_reqq_produce(ioim
->bfa
, ioim
->reqq
);
2579 * Call to resume any I/O requests waiting for room in request queue.
2582 bfa_ioim_qresume(void *cbarg
)
2584 struct bfa_ioim_s
*ioim
= cbarg
;
2586 bfa_stats(ioim
->itnim
, qresumes
);
2587 bfa_sm_send_event(ioim
, BFA_IOIM_SM_QRESUME
);
2592 bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
)
2595 * Move IO from itnim queue to fcpim global queue since itnim will be
2598 list_del(&ioim
->qe
);
2599 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
2601 if (!ioim
->iosp
->tskim
) {
2602 if (ioim
->fcpim
->delay_comp
&& ioim
->itnim
->iotov_active
) {
2603 bfa_cb_dequeue(&ioim
->hcb_qe
);
2604 list_del(&ioim
->qe
);
2605 list_add_tail(&ioim
->qe
, &ioim
->itnim
->delay_comp_q
);
2607 bfa_itnim_iodone(ioim
->itnim
);
2609 bfa_tskim_iodone(ioim
->iosp
->tskim
);
2612 static bfa_boolean_t
2613 bfa_ioim_is_abortable(struct bfa_ioim_s
*ioim
)
2615 if ((bfa_sm_cmp_state(ioim
, bfa_ioim_sm_uninit
) &&
2616 (!bfa_q_is_on_q(&ioim
->itnim
->pending_q
, ioim
))) ||
2617 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_abort
)) ||
2618 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_abort_qfull
)) ||
2619 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_hcb
)) ||
2620 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_hcb_free
)) ||
2621 (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_resfree
)))
2628 * or after the link comes back.
2631 bfa_ioim_delayed_comp(struct bfa_ioim_s
*ioim
, bfa_boolean_t iotov
)
2634 * If path tov timer expired, failback with PATHTOV status - these
2635 * IO requests are not normally retried by IO stack.
2637 * Otherwise device cameback online and fail it with normal failed
2638 * status so that IO stack retries these failed IO requests.
2641 ioim
->io_cbfn
= __bfa_cb_ioim_pathtov
;
2643 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
2644 bfa_stats(ioim
->itnim
, iocom_nexus_abort
);
2646 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
2649 * Move IO to fcpim global queue since itnim will be
2652 list_del(&ioim
->qe
);
2653 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
2663 * Memory allocation and initialization.
2666 bfa_ioim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
2668 struct bfa_ioim_s
*ioim
;
2669 struct bfa_ioim_sp_s
*iosp
;
2675 * claim memory first
2677 ioim
= (struct bfa_ioim_s
*) bfa_meminfo_kva(minfo
);
2678 fcpim
->ioim_arr
= ioim
;
2679 bfa_meminfo_kva(minfo
) = (u8
*) (ioim
+ fcpim
->num_ioim_reqs
);
2681 iosp
= (struct bfa_ioim_sp_s
*) bfa_meminfo_kva(minfo
);
2682 fcpim
->ioim_sp_arr
= iosp
;
2683 bfa_meminfo_kva(minfo
) = (u8
*) (iosp
+ fcpim
->num_ioim_reqs
);
2686 * Claim DMA memory for per IO sense data.
2688 snsbufsz
= fcpim
->num_ioim_reqs
* BFI_IOIM_SNSLEN
;
2689 fcpim
->snsbase
.pa
= bfa_meminfo_dma_phys(minfo
);
2690 bfa_meminfo_dma_phys(minfo
) += snsbufsz
;
2692 fcpim
->snsbase
.kva
= bfa_meminfo_dma_virt(minfo
);
2693 bfa_meminfo_dma_virt(minfo
) += snsbufsz
;
2694 snsinfo
= fcpim
->snsbase
.kva
;
2695 bfa_iocfc_set_snsbase(fcpim
->bfa
, fcpim
->snsbase
.pa
);
2698 * Initialize ioim free queues
2700 INIT_LIST_HEAD(&fcpim
->ioim_free_q
);
2701 INIT_LIST_HEAD(&fcpim
->ioim_resfree_q
);
2702 INIT_LIST_HEAD(&fcpim
->ioim_comp_q
);
2704 for (i
= 0; i
< fcpim
->num_ioim_reqs
;
2705 i
++, ioim
++, iosp
++, snsinfo
+= BFI_IOIM_SNSLEN
) {
2709 bfa_os_memset(ioim
, 0, sizeof(struct bfa_ioim_s
));
2711 ioim
->bfa
= fcpim
->bfa
;
2712 ioim
->fcpim
= fcpim
;
2714 iosp
->snsinfo
= snsinfo
;
2715 INIT_LIST_HEAD(&ioim
->sgpg_q
);
2716 bfa_reqq_winit(&ioim
->iosp
->reqq_wait
,
2717 bfa_ioim_qresume
, ioim
);
2718 bfa_sgpg_winit(&ioim
->iosp
->sgpg_wqe
,
2719 bfa_ioim_sgpg_alloced
, ioim
);
2720 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
2722 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
2727 * Driver detach time call.
2730 bfa_ioim_detach(struct bfa_fcpim_mod_s
*fcpim
)
2735 bfa_ioim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
2737 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
2738 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
2739 struct bfa_ioim_s
*ioim
;
2741 enum bfa_ioim_event evt
= BFA_IOIM_SM_COMP
;
2743 iotag
= bfa_os_ntohs(rsp
->io_tag
);
2745 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
2746 bfa_assert(ioim
->iotag
== iotag
);
2748 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2749 bfa_trc(ioim
->bfa
, rsp
->io_status
);
2750 bfa_trc(ioim
->bfa
, rsp
->reuse_io_tag
);
2752 if (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_active
))
2753 bfa_os_assign(ioim
->iosp
->comp_rspmsg
, *m
);
2755 switch (rsp
->io_status
) {
2756 case BFI_IOIM_STS_OK
:
2757 bfa_stats(ioim
->itnim
, iocomp_ok
);
2758 if (rsp
->reuse_io_tag
== 0)
2759 evt
= BFA_IOIM_SM_DONE
;
2761 evt
= BFA_IOIM_SM_COMP
;
2764 case BFI_IOIM_STS_TIMEDOUT
:
2765 bfa_stats(ioim
->itnim
, iocomp_timedout
);
2766 case BFI_IOIM_STS_ABORTED
:
2767 rsp
->io_status
= BFI_IOIM_STS_ABORTED
;
2768 bfa_stats(ioim
->itnim
, iocomp_aborted
);
2769 if (rsp
->reuse_io_tag
== 0)
2770 evt
= BFA_IOIM_SM_DONE
;
2772 evt
= BFA_IOIM_SM_COMP
;
2775 case BFI_IOIM_STS_PROTO_ERR
:
2776 bfa_stats(ioim
->itnim
, iocom_proto_err
);
2777 bfa_assert(rsp
->reuse_io_tag
);
2778 evt
= BFA_IOIM_SM_COMP
;
2781 case BFI_IOIM_STS_SQER_NEEDED
:
2782 bfa_stats(ioim
->itnim
, iocom_sqer_needed
);
2783 bfa_assert(rsp
->reuse_io_tag
== 0);
2784 evt
= BFA_IOIM_SM_SQRETRY
;
2787 case BFI_IOIM_STS_RES_FREE
:
2788 bfa_stats(ioim
->itnim
, iocom_res_free
);
2789 evt
= BFA_IOIM_SM_FREE
;
2792 case BFI_IOIM_STS_HOST_ABORTED
:
2793 bfa_stats(ioim
->itnim
, iocom_hostabrts
);
2794 if (rsp
->abort_tag
!= ioim
->abort_tag
) {
2795 bfa_trc(ioim
->bfa
, rsp
->abort_tag
);
2796 bfa_trc(ioim
->bfa
, ioim
->abort_tag
);
2800 if (rsp
->reuse_io_tag
)
2801 evt
= BFA_IOIM_SM_ABORT_COMP
;
2803 evt
= BFA_IOIM_SM_ABORT_DONE
;
2806 case BFI_IOIM_STS_UTAG
:
2807 bfa_stats(ioim
->itnim
, iocom_utags
);
2808 evt
= BFA_IOIM_SM_COMP_UTAG
;
2815 bfa_sm_send_event(ioim
, evt
);
2819 bfa_ioim_good_comp_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
2821 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
2822 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
2823 struct bfa_ioim_s
*ioim
;
2826 iotag
= bfa_os_ntohs(rsp
->io_tag
);
2828 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
2829 bfa_assert(ioim
->iotag
== iotag
);
2831 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2832 bfa_ioim_cb_profile_comp(fcpim
, ioim
);
2834 bfa_sm_send_event(ioim
, BFA_IOIM_SM_COMP_GOOD
);
2838 bfa_ioim_profile_start(struct bfa_ioim_s
*ioim
)
2840 ioim
->start_time
= bfa_os_get_clock();
2844 bfa_ioim_profile_comp(struct bfa_ioim_s
*ioim
)
2846 u32 fcp_dl
= bfa_cb_ioim_get_size(ioim
->dio
);
2847 u32 index
= bfa_ioim_get_index(fcp_dl
);
2848 u64 end_time
= bfa_os_get_clock();
2849 struct bfa_itnim_latency_s
*io_lat
=
2850 &(ioim
->itnim
->ioprofile
.io_latency
);
2851 u32 val
= (u32
)(end_time
- ioim
->start_time
);
2853 bfa_itnim_ioprofile_update(ioim
->itnim
, index
);
2855 io_lat
->count
[index
]++;
2856 io_lat
->min
[index
] = (io_lat
->min
[index
] < val
) ?
2857 io_lat
->min
[index
] : val
;
2858 io_lat
->max
[index
] = (io_lat
->max
[index
] > val
) ?
2859 io_lat
->max
[index
] : val
;
2860 io_lat
->avg
[index
] += val
;
2863 * Called by itnim to clean up IO while going offline.
2866 bfa_ioim_cleanup(struct bfa_ioim_s
*ioim
)
2868 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2869 bfa_stats(ioim
->itnim
, io_cleanups
);
2871 ioim
->iosp
->tskim
= NULL
;
2872 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
2876 bfa_ioim_cleanup_tm(struct bfa_ioim_s
*ioim
, struct bfa_tskim_s
*tskim
)
2878 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2879 bfa_stats(ioim
->itnim
, io_tmaborts
);
2881 ioim
->iosp
->tskim
= tskim
;
2882 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
2886 * IOC failure handling.
2889 bfa_ioim_iocdisable(struct bfa_ioim_s
*ioim
)
2891 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2892 bfa_stats(ioim
->itnim
, io_iocdowns
);
2893 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HWFAIL
);
2897 * IO offline TOV popped. Fail the pending IO.
2900 bfa_ioim_tov(struct bfa_ioim_s
*ioim
)
2902 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2903 bfa_sm_send_event(ioim
, BFA_IOIM_SM_IOTOV
);
2913 * Allocate IOIM resource for initiator mode I/O request.
2916 bfa_ioim_alloc(struct bfa_s
*bfa
, struct bfad_ioim_s
*dio
,
2917 struct bfa_itnim_s
*itnim
, u16 nsges
)
2919 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
2920 struct bfa_ioim_s
*ioim
;
2923 * alocate IOIM resource
2925 bfa_q_deq(&fcpim
->ioim_free_q
, &ioim
);
2927 bfa_stats(itnim
, no_iotags
);
2932 ioim
->itnim
= itnim
;
2933 ioim
->nsges
= nsges
;
2936 bfa_stats(itnim
, total_ios
);
2937 fcpim
->ios_active
++;
2939 list_add_tail(&ioim
->qe
, &itnim
->io_q
);
2940 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2946 bfa_ioim_free(struct bfa_ioim_s
*ioim
)
2948 struct bfa_fcpim_mod_s
*fcpim
= ioim
->fcpim
;
2950 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2951 bfa_assert_fp(bfa_sm_cmp_state(ioim
, bfa_ioim_sm_uninit
));
2953 bfa_assert_fp(list_empty(&ioim
->sgpg_q
) ||
2954 (ioim
->nsges
> BFI_SGE_INLINE
));
2956 if (ioim
->nsgpgs
> 0)
2957 bfa_sgpg_mfree(ioim
->bfa
, &ioim
->sgpg_q
, ioim
->nsgpgs
);
2959 bfa_stats(ioim
->itnim
, io_comps
);
2960 fcpim
->ios_active
--;
2962 list_del(&ioim
->qe
);
2963 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
2967 bfa_ioim_start(struct bfa_ioim_s
*ioim
)
2969 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
2971 bfa_ioim_cb_profile_start(ioim
->fcpim
, ioim
);
2974 * Obtain the queue over which this request has to be issued
2976 ioim
->reqq
= bfa_fcpim_ioredirect_enabled(ioim
->bfa
) ?
2977 bfa_cb_ioim_get_reqq(ioim
->dio
) :
2978 bfa_itnim_get_reqq(ioim
);
2980 bfa_sm_send_event(ioim
, BFA_IOIM_SM_START
);
2984 * Driver I/O abort request.
2987 bfa_ioim_abort(struct bfa_ioim_s
*ioim
)
2990 bfa_trc(ioim
->bfa
, ioim
->iotag
);
2992 if (!bfa_ioim_is_abortable(ioim
))
2993 return BFA_STATUS_FAILED
;
2995 bfa_stats(ioim
->itnim
, io_aborts
);
2996 bfa_sm_send_event(ioim
, BFA_IOIM_SM_ABORT
);
2998 return BFA_STATUS_OK
;
3003 * BFA TSKIM state machine functions
3007 * Task management command beginning state.
3010 bfa_tskim_sm_uninit(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
3012 bfa_trc(tskim
->bfa
, event
);
3015 case BFA_TSKIM_SM_START
:
3016 bfa_sm_set_state(tskim
, bfa_tskim_sm_active
);
3017 bfa_tskim_gather_ios(tskim
);
3020 * If device is offline, do not send TM on wire. Just cleanup
3021 * any pending IO requests and complete TM request.
3023 if (!bfa_itnim_is_online(tskim
->itnim
)) {
3024 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
3025 tskim
->tsk_status
= BFI_TSKIM_STS_OK
;
3026 bfa_tskim_cleanup_ios(tskim
);
3030 if (!bfa_tskim_send(tskim
)) {
3031 bfa_sm_set_state(tskim
, bfa_tskim_sm_qfull
);
3032 bfa_stats(tskim
->itnim
, tm_qwait
);
3033 bfa_reqq_wait(tskim
->bfa
, tskim
->itnim
->reqq
,
3039 bfa_sm_fault(tskim
->bfa
, event
);
3045 * TM command is active, awaiting completion from firmware to
3046 * cleanup IO requests in TM scope.
3049 bfa_tskim_sm_active(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
3051 bfa_trc(tskim
->bfa
, event
);
3054 case BFA_TSKIM_SM_DONE
:
3055 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
3056 bfa_tskim_cleanup_ios(tskim
);
3059 case BFA_TSKIM_SM_CLEANUP
:
3060 bfa_sm_set_state(tskim
, bfa_tskim_sm_cleanup
);
3061 if (!bfa_tskim_send_abort(tskim
)) {
3062 bfa_sm_set_state(tskim
, bfa_tskim_sm_cleanup_qfull
);
3063 bfa_stats(tskim
->itnim
, tm_qwait
);
3064 bfa_reqq_wait(tskim
->bfa
, tskim
->itnim
->reqq
,
3069 case BFA_TSKIM_SM_HWFAIL
:
3070 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
3071 bfa_tskim_iocdisable_ios(tskim
);
3072 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
3076 bfa_sm_fault(tskim
->bfa
, event
);
3081 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3082 * completion event from firmware.
3085 bfa_tskim_sm_cleanup(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
3087 bfa_trc(tskim
->bfa
, event
);
3090 case BFA_TSKIM_SM_DONE
:
3092 * Ignore and wait for ABORT completion from firmware.
3096 case BFA_TSKIM_SM_CLEANUP_DONE
:
3097 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
3098 bfa_tskim_cleanup_ios(tskim
);
3101 case BFA_TSKIM_SM_HWFAIL
:
3102 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
3103 bfa_tskim_iocdisable_ios(tskim
);
3104 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
3108 bfa_sm_fault(tskim
->bfa
, event
);
3113 bfa_tskim_sm_iocleanup(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
3115 bfa_trc(tskim
->bfa
, event
);
3118 case BFA_TSKIM_SM_IOS_DONE
:
3119 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
3120 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_done
);
3123 case BFA_TSKIM_SM_CLEANUP
:
3125 * Ignore, TM command completed on wire.
3126 * Notify TM conmpletion on IO cleanup completion.
3130 case BFA_TSKIM_SM_HWFAIL
:
3131 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
3132 bfa_tskim_iocdisable_ios(tskim
);
3133 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
3137 bfa_sm_fault(tskim
->bfa
, event
);
3142 * Task management command is waiting for room in request CQ
3145 bfa_tskim_sm_qfull(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
3147 bfa_trc(tskim
->bfa
, event
);
3150 case BFA_TSKIM_SM_QRESUME
:
3151 bfa_sm_set_state(tskim
, bfa_tskim_sm_active
);
3152 bfa_tskim_send(tskim
);
3155 case BFA_TSKIM_SM_CLEANUP
:
3157 * No need to send TM on wire since ITN is offline.
3159 bfa_sm_set_state(tskim
, bfa_tskim_sm_iocleanup
);
3160 bfa_reqq_wcancel(&tskim
->reqq_wait
);
3161 bfa_tskim_cleanup_ios(tskim
);
3164 case BFA_TSKIM_SM_HWFAIL
:
3165 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
3166 bfa_reqq_wcancel(&tskim
->reqq_wait
);
3167 bfa_tskim_iocdisable_ios(tskim
);
3168 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
3172 bfa_sm_fault(tskim
->bfa
, event
);
3177 * Task management command is active, awaiting for room in request CQ
3178 * to send clean up request.
3181 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s
*tskim
,
3182 enum bfa_tskim_event event
)
3184 bfa_trc(tskim
->bfa
, event
);
3187 case BFA_TSKIM_SM_DONE
:
3188 bfa_reqq_wcancel(&tskim
->reqq_wait
);
3194 case BFA_TSKIM_SM_QRESUME
:
3195 bfa_sm_set_state(tskim
, bfa_tskim_sm_cleanup
);
3196 bfa_tskim_send_abort(tskim
);
3199 case BFA_TSKIM_SM_HWFAIL
:
3200 bfa_sm_set_state(tskim
, bfa_tskim_sm_hcb
);
3201 bfa_reqq_wcancel(&tskim
->reqq_wait
);
3202 bfa_tskim_iocdisable_ios(tskim
);
3203 bfa_tskim_qcomp(tskim
, __bfa_cb_tskim_failed
);
3207 bfa_sm_fault(tskim
->bfa
, event
);
3212 * BFA callback is pending
3215 bfa_tskim_sm_hcb(struct bfa_tskim_s
*tskim
, enum bfa_tskim_event event
)
3217 bfa_trc(tskim
->bfa
, event
);
3220 case BFA_TSKIM_SM_HCB
:
3221 bfa_sm_set_state(tskim
, bfa_tskim_sm_uninit
);
3222 bfa_tskim_free(tskim
);
3225 case BFA_TSKIM_SM_CLEANUP
:
3226 bfa_tskim_notify_comp(tskim
);
3229 case BFA_TSKIM_SM_HWFAIL
:
3233 bfa_sm_fault(tskim
->bfa
, event
);
3244 __bfa_cb_tskim_done(void *cbarg
, bfa_boolean_t complete
)
3246 struct bfa_tskim_s
*tskim
= cbarg
;
3249 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_HCB
);
3253 bfa_stats(tskim
->itnim
, tm_success
);
3254 bfa_cb_tskim_done(tskim
->bfa
->bfad
, tskim
->dtsk
, tskim
->tsk_status
);
3258 __bfa_cb_tskim_failed(void *cbarg
, bfa_boolean_t complete
)
3260 struct bfa_tskim_s
*tskim
= cbarg
;
3263 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_HCB
);
3267 bfa_stats(tskim
->itnim
, tm_failures
);
3268 bfa_cb_tskim_done(tskim
->bfa
->bfad
, tskim
->dtsk
,
3269 BFI_TSKIM_STS_FAILED
);
3272 static bfa_boolean_t
3273 bfa_tskim_match_scope(struct bfa_tskim_s
*tskim
, lun_t lun
)
3275 switch (tskim
->tm_cmnd
) {
3276 case FCP_TM_TARGET_RESET
:
3279 case FCP_TM_ABORT_TASK_SET
:
3280 case FCP_TM_CLEAR_TASK_SET
:
3281 case FCP_TM_LUN_RESET
:
3282 case FCP_TM_CLEAR_ACA
:
3283 return (tskim
->lun
== lun
);
3293 * Gather affected IO requests and task management commands.
3296 bfa_tskim_gather_ios(struct bfa_tskim_s
*tskim
)
3298 struct bfa_itnim_s
*itnim
= tskim
->itnim
;
3299 struct bfa_ioim_s
*ioim
;
3300 struct list_head
*qe
, *qen
;
3302 INIT_LIST_HEAD(&tskim
->io_q
);
3305 * Gather any active IO requests first.
3307 list_for_each_safe(qe
, qen
, &itnim
->io_q
) {
3308 ioim
= (struct bfa_ioim_s
*) qe
;
3309 if (bfa_tskim_match_scope
3310 (tskim
, bfa_cb_ioim_get_lun(ioim
->dio
))) {
3311 list_del(&ioim
->qe
);
3312 list_add_tail(&ioim
->qe
, &tskim
->io_q
);
3317 * Failback any pending IO requests immediately.
3319 list_for_each_safe(qe
, qen
, &itnim
->pending_q
) {
3320 ioim
= (struct bfa_ioim_s
*) qe
;
3321 if (bfa_tskim_match_scope
3322 (tskim
, bfa_cb_ioim_get_lun(ioim
->dio
))) {
3323 list_del(&ioim
->qe
);
3324 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
3331 * IO cleanup completion
3334 bfa_tskim_cleanp_comp(void *tskim_cbarg
)
3336 struct bfa_tskim_s
*tskim
= tskim_cbarg
;
3338 bfa_stats(tskim
->itnim
, tm_io_comps
);
3339 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_IOS_DONE
);
3343 * Gather affected IO requests and task management commands.
3346 bfa_tskim_cleanup_ios(struct bfa_tskim_s
*tskim
)
3348 struct bfa_ioim_s
*ioim
;
3349 struct list_head
*qe
, *qen
;
3351 bfa_wc_init(&tskim
->wc
, bfa_tskim_cleanp_comp
, tskim
);
3353 list_for_each_safe(qe
, qen
, &tskim
->io_q
) {
3354 ioim
= (struct bfa_ioim_s
*) qe
;
3355 bfa_wc_up(&tskim
->wc
);
3356 bfa_ioim_cleanup_tm(ioim
, tskim
);
3359 bfa_wc_wait(&tskim
->wc
);
3363 * Send task management request to firmware.
3365 static bfa_boolean_t
3366 bfa_tskim_send(struct bfa_tskim_s
*tskim
)
3368 struct bfa_itnim_s
*itnim
= tskim
->itnim
;
3369 struct bfi_tskim_req_s
*m
;
3372 * check for room in queue to send request now
3374 m
= bfa_reqq_next(tskim
->bfa
, itnim
->reqq
);
3379 * build i/o request message next
3381 bfi_h2i_set(m
->mh
, BFI_MC_TSKIM
, BFI_TSKIM_H2I_TM_REQ
,
3382 bfa_lpuid(tskim
->bfa
));
3384 m
->tsk_tag
= bfa_os_htons(tskim
->tsk_tag
);
3385 m
->itn_fhdl
= tskim
->itnim
->rport
->fw_handle
;
3386 m
->t_secs
= tskim
->tsecs
;
3387 m
->lun
= tskim
->lun
;
3388 m
->tm_flags
= tskim
->tm_cmnd
;
3391 * queue I/O message to firmware
3393 bfa_reqq_produce(tskim
->bfa
, itnim
->reqq
);
3398 * Send abort request to cleanup an active TM to firmware.
3400 static bfa_boolean_t
3401 bfa_tskim_send_abort(struct bfa_tskim_s
*tskim
)
3403 struct bfa_itnim_s
*itnim
= tskim
->itnim
;
3404 struct bfi_tskim_abortreq_s
*m
;
3407 * check for room in queue to send request now
3409 m
= bfa_reqq_next(tskim
->bfa
, itnim
->reqq
);
3414 * build i/o request message next
3416 bfi_h2i_set(m
->mh
, BFI_MC_TSKIM
, BFI_TSKIM_H2I_ABORT_REQ
,
3417 bfa_lpuid(tskim
->bfa
));
3419 m
->tsk_tag
= bfa_os_htons(tskim
->tsk_tag
);
3422 * queue I/O message to firmware
3424 bfa_reqq_produce(tskim
->bfa
, itnim
->reqq
);
3429 * Call to resume task management cmnd waiting for room in request queue.
3432 bfa_tskim_qresume(void *cbarg
)
3434 struct bfa_tskim_s
*tskim
= cbarg
;
3436 bfa_stats(tskim
->itnim
, tm_qresumes
);
3437 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_QRESUME
);
3441 * Cleanup IOs associated with a task mangement command on IOC failures.
3444 bfa_tskim_iocdisable_ios(struct bfa_tskim_s
*tskim
)
3446 struct bfa_ioim_s
*ioim
;
3447 struct list_head
*qe
, *qen
;
3449 list_for_each_safe(qe
, qen
, &tskim
->io_q
) {
3450 ioim
= (struct bfa_ioim_s
*) qe
;
3451 bfa_ioim_iocdisable(ioim
);
3462 * Notification on completions from related ioim.
3465 bfa_tskim_iodone(struct bfa_tskim_s
*tskim
)
3467 bfa_wc_down(&tskim
->wc
);
3471 * Handle IOC h/w failure notification from itnim.
3474 bfa_tskim_iocdisable(struct bfa_tskim_s
*tskim
)
3476 tskim
->notify
= BFA_FALSE
;
3477 bfa_stats(tskim
->itnim
, tm_iocdowns
);
3478 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_HWFAIL
);
3482 * Cleanup TM command and associated IOs as part of ITNIM offline.
3485 bfa_tskim_cleanup(struct bfa_tskim_s
*tskim
)
3487 tskim
->notify
= BFA_TRUE
;
3488 bfa_stats(tskim
->itnim
, tm_cleanups
);
3489 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_CLEANUP
);
3493 * Memory allocation and initialization.
3496 bfa_tskim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
3498 struct bfa_tskim_s
*tskim
;
3501 INIT_LIST_HEAD(&fcpim
->tskim_free_q
);
3503 tskim
= (struct bfa_tskim_s
*) bfa_meminfo_kva(minfo
);
3504 fcpim
->tskim_arr
= tskim
;
3506 for (i
= 0; i
< fcpim
->num_tskim_reqs
; i
++, tskim
++) {
3510 bfa_os_memset(tskim
, 0, sizeof(struct bfa_tskim_s
));
3512 tskim
->bfa
= fcpim
->bfa
;
3513 tskim
->fcpim
= fcpim
;
3514 tskim
->notify
= BFA_FALSE
;
3515 bfa_reqq_winit(&tskim
->reqq_wait
, bfa_tskim_qresume
,
3517 bfa_sm_set_state(tskim
, bfa_tskim_sm_uninit
);
3519 list_add_tail(&tskim
->qe
, &fcpim
->tskim_free_q
);
3522 bfa_meminfo_kva(minfo
) = (u8
*) tskim
;
3526 bfa_tskim_detach(struct bfa_fcpim_mod_s
*fcpim
)
3534 bfa_tskim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
3536 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
3537 struct bfi_tskim_rsp_s
*rsp
= (struct bfi_tskim_rsp_s
*) m
;
3538 struct bfa_tskim_s
*tskim
;
3539 u16 tsk_tag
= bfa_os_ntohs(rsp
->tsk_tag
);
3541 tskim
= BFA_TSKIM_FROM_TAG(fcpim
, tsk_tag
);
3542 bfa_assert(tskim
->tsk_tag
== tsk_tag
);
3544 tskim
->tsk_status
= rsp
->tsk_status
;
3547 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3548 * requests. All other statuses are for normal completions.
3550 if (rsp
->tsk_status
== BFI_TSKIM_STS_ABORTED
) {
3551 bfa_stats(tskim
->itnim
, tm_cleanup_comps
);
3552 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_CLEANUP_DONE
);
3554 bfa_stats(tskim
->itnim
, tm_fw_rsps
);
3555 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_DONE
);
3566 struct bfa_tskim_s
*
3567 bfa_tskim_alloc(struct bfa_s
*bfa
, struct bfad_tskim_s
*dtsk
)
3569 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
3570 struct bfa_tskim_s
*tskim
;
3572 bfa_q_deq(&fcpim
->tskim_free_q
, &tskim
);
3581 bfa_tskim_free(struct bfa_tskim_s
*tskim
)
3583 bfa_assert(bfa_q_is_on_q_func(&tskim
->itnim
->tsk_q
, &tskim
->qe
));
3584 list_del(&tskim
->qe
);
3585 list_add_tail(&tskim
->qe
, &tskim
->fcpim
->tskim_free_q
);
3589 * Start a task management command.
3591 * @param[in] tskim BFA task management command instance
3592 * @param[in] itnim i-t nexus for the task management command
3593 * @param[in] lun lun, if applicable
3594 * @param[in] tm_cmnd Task management command code.
3595 * @param[in] t_secs Timeout in seconds
3600 bfa_tskim_start(struct bfa_tskim_s
*tskim
, struct bfa_itnim_s
*itnim
, lun_t lun
,
3601 enum fcp_tm_cmnd tm_cmnd
, u8 tsecs
)
3603 tskim
->itnim
= itnim
;
3605 tskim
->tm_cmnd
= tm_cmnd
;
3606 tskim
->tsecs
= tsecs
;
3607 tskim
->notify
= BFA_FALSE
;
3608 bfa_stats(itnim
, tm_cmnds
);
3610 list_add_tail(&tskim
->qe
, &itnim
->tsk_q
);
3611 bfa_sm_send_event(tskim
, BFA_TSKIM_SM_START
);