]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/bfa/bfa_fcpim.c
[SCSI] bfa: remove all SCSI IO callbacks
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / bfa / bfa_fcpim.c
CommitLineData
7725ccfd 1/*
a36c61f9 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
7725ccfd
JH
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
a36c61f9 18#include "bfa_modules.h"
f314878a 19#include "bfa_os_inc.h"
7725ccfd
JH
20
21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23
a36c61f9
KG
24
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
5fbe25c7 29/*
a36c61f9
KG
30 * BFA ITNIM Related definitions
31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
46} while (0)
47
48#define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
51 else { \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
54 } \
55} while (0)
56
57#define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
60 else { \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
63 } \
64} while (0)
65
66#define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
69 else { \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
72 } \
73} while (0)
74
5fbe25c7 75/*
a36c61f9
KG
76 * bfa_itnim_sm BFA itnim state machine
77 */
78
79
80enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90};
91
5fbe25c7 92/*
a36c61f9
KG
93 * BFA IOIM related definitions
94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
98} while (0)
99
100
101#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
104} while (0)
105
106#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
109} while (0)
a36c61f9 110
5fbe25c7 111/*
a36c61f9
KG
112 * IO state machine events
113 */
114enum bfa_ioim_event {
115 BFA_IOIM_SM_START = 1, /* io start request from host */
116 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
117 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
118 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
119 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
120 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
121 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
122 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
123 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
124 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
125 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
126 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
127 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
128 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
129 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
130 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
131 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
132 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
133};
134
135
5fbe25c7 136/*
a36c61f9
KG
137 * BFA TSKIM related definitions
138 */
139
5fbe25c7 140/*
a36c61f9
KG
141 * task management completion handling
142 */
143#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
144 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
145 bfa_tskim_notify_comp(__tskim); \
146} while (0)
147
148#define bfa_tskim_notify_comp(__tskim) do { \
149 if ((__tskim)->notify) \
150 bfa_itnim_tskdone((__tskim)->itnim); \
151} while (0)
152
153
154enum bfa_tskim_event {
155 BFA_TSKIM_SM_START = 1, /* TM command start */
156 BFA_TSKIM_SM_DONE = 2, /* TM completion */
157 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
158 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
159 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
160 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
161 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
162 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
163};
164
5fbe25c7 165/*
a36c61f9
KG
166 * forward declaration for BFA ITNIM functions
167 */
168static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
169static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
170static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
171static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
172static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
173static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
174static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
175static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
176static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
177static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
178static void bfa_itnim_iotov(void *itnim_arg);
179static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
180static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
181static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
182
5fbe25c7 183/*
a36c61f9
KG
184 * forward declaration of ITNIM state machine
185 */
186static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
187 enum bfa_itnim_event event);
188static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
189 enum bfa_itnim_event event);
190static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
191 enum bfa_itnim_event event);
192static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
193 enum bfa_itnim_event event);
194static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
195 enum bfa_itnim_event event);
196static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
197 enum bfa_itnim_event event);
198static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
199 enum bfa_itnim_event event);
200static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
201 enum bfa_itnim_event event);
202static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
203 enum bfa_itnim_event event);
204static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
209 enum bfa_itnim_event event);
210static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
211 enum bfa_itnim_event event);
212static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
213 enum bfa_itnim_event event);
214static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
215 enum bfa_itnim_event event);
216
5fbe25c7 217/*
a36c61f9
KG
218 * forward declaration for BFA IOIM functions
219 */
220static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
e3e7d3ee 221static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
a36c61f9
KG
222static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
223static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
224static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
225static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
226static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
227static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
228static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
229static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
230
231
5fbe25c7 232/*
a36c61f9
KG
233 * forward declaration of BFA IO state machine
234 */
235static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
236 enum bfa_ioim_event event);
237static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
238 enum bfa_ioim_event event);
239static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
251static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
252 enum bfa_ioim_event event);
253static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
254 enum bfa_ioim_event event);
255static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259
5fbe25c7 260/*
a36c61f9
KG
261 * forward declaration for BFA TSKIM functions
262 */
263static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
264static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
265static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
f314878a 266 struct scsi_lun lun);
a36c61f9
KG
267static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
268static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
269static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
270static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
271static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
272static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
273
274
5fbe25c7 275/*
a36c61f9
KG
276 * forward declaration of BFA TSKIM state machine
277 */
278static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
279 enum bfa_tskim_event event);
280static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
281 enum bfa_tskim_event event);
282static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
283 enum bfa_tskim_event event);
284static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
285 enum bfa_tskim_event event);
286static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
287 enum bfa_tskim_event event);
288static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
289 enum bfa_tskim_event event);
290static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
291 enum bfa_tskim_event event);
292
5fbe25c7 293/*
df0f1933 294 * BFA FCP Initiator Mode module
7725ccfd
JH
295 */
296
5fbe25c7 297/*
a36c61f9 298 * Compute and return memory needed by FCP(im) module.
7725ccfd
JH
299 */
300static void
301bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
302 u32 *dm_len)
303{
304 bfa_itnim_meminfo(cfg, km_len, dm_len);
305
5fbe25c7 306 /*
7725ccfd
JH
307 * IO memory
308 */
309 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
310 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
311 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
312 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
313
314 *km_len += cfg->fwcfg.num_ioim_reqs *
315 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
316
317 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
318
5fbe25c7 319 /*
7725ccfd
JH
320 * task management command memory
321 */
322 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
323 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
324 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
325}
326
327
328static void
329bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
a36c61f9 330 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
7725ccfd
JH
331{
332 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
333
334 bfa_trc(bfa, cfg->drvcfg.path_tov);
335 bfa_trc(bfa, cfg->fwcfg.num_rports);
336 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
337 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
338
a36c61f9
KG
339 fcpim->bfa = bfa;
340 fcpim->num_itnims = cfg->fwcfg.num_rports;
7725ccfd
JH
341 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
342 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
a36c61f9
KG
343 fcpim->path_tov = cfg->drvcfg.path_tov;
344 fcpim->delay_comp = cfg->drvcfg.delay_comp;
345 fcpim->profile_comp = NULL;
346 fcpim->profile_start = NULL;
7725ccfd
JH
347
348 bfa_itnim_attach(fcpim, meminfo);
349 bfa_tskim_attach(fcpim, meminfo);
350 bfa_ioim_attach(fcpim, meminfo);
351}
352
7725ccfd
JH
353static void
354bfa_fcpim_detach(struct bfa_s *bfa)
355{
7725ccfd
JH
356}
357
358static void
359bfa_fcpim_start(struct bfa_s *bfa)
360{
361}
362
363static void
364bfa_fcpim_stop(struct bfa_s *bfa)
365{
366}
367
368static void
369bfa_fcpim_iocdisable(struct bfa_s *bfa)
370{
371 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
372 struct bfa_itnim_s *itnim;
a36c61f9 373 struct list_head *qe, *qen;
7725ccfd
JH
374
375 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
376 itnim = (struct bfa_itnim_s *) qe;
377 bfa_itnim_iocdisable(itnim);
378 }
379}
380
a36c61f9
KG
381void
382bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
383 struct bfa_itnim_iostats_s *rstats)
384{
385 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
386 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
387 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
388 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
389 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
390 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
391 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
392 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
393 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
394 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
395 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
396 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
397 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
398 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
399 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
400 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
401 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
402 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
403 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
404 bfa_fcpim_add_iostats(lstats, rstats, onlines);
405 bfa_fcpim_add_iostats(lstats, rstats, offlines);
406 bfa_fcpim_add_iostats(lstats, rstats, creates);
407 bfa_fcpim_add_iostats(lstats, rstats, deletes);
408 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
409 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
410 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
411 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
412 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
413 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
414 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
415 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
416 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
417 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
418 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
419 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
420 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
421 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
422 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
424 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
425 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
426 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
427 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
428 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
429}
430
7725ccfd
JH
431void
432bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
433{
434 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
435
436 fcpim->path_tov = path_tov * 1000;
437 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
438 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
439}
440
441u16
442bfa_fcpim_path_tov_get(struct bfa_s *bfa)
443{
444 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
445
f8ceafde 446 return fcpim->path_tov / 1000;
7725ccfd
JH
447}
448
449bfa_status_t
a36c61f9
KG
450bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
451 u8 lp_tag)
452{
453 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
454 struct list_head *qe, *qen;
455 struct bfa_itnim_s *itnim;
456
457 /* accumulate IO stats from itnim */
6a18b167 458 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
a36c61f9
KG
459 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
460 itnim = (struct bfa_itnim_s *) qe;
461 if (itnim->rport->rport_info.lp_tag != lp_tag)
462 continue;
463 bfa_fcpim_add_stats(stats, &(itnim->stats));
464 }
465 return BFA_STATUS_OK;
466}
467bfa_status_t
468bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
469{
470 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
471 struct list_head *qe, *qen;
472 struct bfa_itnim_s *itnim;
473
474 /* accumulate IO stats from itnim */
6a18b167 475 memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
a36c61f9
KG
476 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
477 itnim = (struct bfa_itnim_s *) qe;
478 bfa_fcpim_add_stats(modstats, &(itnim->stats));
479 }
480 return BFA_STATUS_OK;
481}
482
483bfa_status_t
484bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
485 struct bfa_fcpim_del_itn_stats_s *modstats)
486{
487 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
488
489 *modstats = fcpim->del_itn_stats;
490
491 return BFA_STATUS_OK;
492}
493
494
495bfa_status_t
496bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
497{
498 struct bfa_itnim_s *itnim;
499 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
500 struct list_head *qe, *qen;
501
502 /* accumulate IO stats from itnim */
503 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
504 itnim = (struct bfa_itnim_s *) qe;
505 bfa_itnim_clear_stats(itnim);
506 }
507 fcpim->io_profile = BFA_TRUE;
508 fcpim->io_profile_start_time = time;
509 fcpim->profile_comp = bfa_ioim_profile_comp;
510 fcpim->profile_start = bfa_ioim_profile_start;
511
512 return BFA_STATUS_OK;
513}
514bfa_status_t
515bfa_fcpim_profile_off(struct bfa_s *bfa)
7725ccfd
JH
516{
517 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
a36c61f9
KG
518 fcpim->io_profile = BFA_FALSE;
519 fcpim->io_profile_start_time = 0;
520 fcpim->profile_comp = NULL;
521 fcpim->profile_start = NULL;
522 return BFA_STATUS_OK;
523}
7725ccfd 524
a36c61f9
KG
525bfa_status_t
526bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
527{
528 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
529 struct list_head *qe, *qen;
530 struct bfa_itnim_s *itnim;
7725ccfd 531
a36c61f9
KG
532 /* clear IO stats from all active itnims */
533 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
534 itnim = (struct bfa_itnim_s *) qe;
535 if (itnim->rport->rport_info.lp_tag != lp_tag)
536 continue;
537 bfa_itnim_clear_stats(itnim);
538 }
7725ccfd 539 return BFA_STATUS_OK;
a36c61f9 540
7725ccfd
JH
541}
542
543bfa_status_t
544bfa_fcpim_clr_modstats(struct bfa_s *bfa)
545{
546 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
a36c61f9
KG
547 struct list_head *qe, *qen;
548 struct bfa_itnim_s *itnim;
7725ccfd 549
a36c61f9
KG
550 /* clear IO stats from all active itnims */
551 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
552 itnim = (struct bfa_itnim_s *) qe;
553 bfa_itnim_clear_stats(itnim);
554 }
6a18b167 555 memset(&fcpim->del_itn_stats, 0,
a36c61f9 556 sizeof(struct bfa_fcpim_del_itn_stats_s));
7725ccfd
JH
557
558 return BFA_STATUS_OK;
559}
560
561void
562bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
563{
564 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
565
566 bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
567
568 fcpim->q_depth = q_depth;
569}
570
571u16
572bfa_fcpim_qdepth_get(struct bfa_s *bfa)
573{
574 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
575
f8ceafde 576 return fcpim->q_depth;
7725ccfd
JH
577}
578
36d345a7
JH
579void
580bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
581{
582 bfa_boolean_t ioredirect;
583
584 /*
585 * IO redirection is turned off when QoS is enabled and vice versa
586 */
587 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
36d345a7
JH
588}
589
590void
591bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
592{
593 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
594 fcpim->ioredirect = state;
595}
a36c61f9
KG
596
597
598
5fbe25c7 599/*
a36c61f9
KG
600 * BFA ITNIM module state machine functions
601 */
602
5fbe25c7 603/*
a36c61f9
KG
604 * Beginning/unallocated state - no events expected.
605 */
606static void
607bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
608{
609 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
610 bfa_trc(itnim->bfa, event);
611
612 switch (event) {
613 case BFA_ITNIM_SM_CREATE:
614 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
615 itnim->is_online = BFA_FALSE;
616 bfa_fcpim_additn(itnim);
617 break;
618
619 default:
620 bfa_sm_fault(itnim->bfa, event);
621 }
622}
623
5fbe25c7 624/*
a36c61f9
KG
625 * Beginning state, only online event expected.
626 */
627static void
628bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
629{
630 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
631 bfa_trc(itnim->bfa, event);
632
633 switch (event) {
634 case BFA_ITNIM_SM_ONLINE:
635 if (bfa_itnim_send_fwcreate(itnim))
636 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
637 else
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
639 break;
640
641 case BFA_ITNIM_SM_DELETE:
642 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
643 bfa_fcpim_delitn(itnim);
644 break;
645
646 case BFA_ITNIM_SM_HWFAIL:
647 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
648 break;
649
650 default:
651 bfa_sm_fault(itnim->bfa, event);
652 }
653}
654
5fbe25c7 655/*
a36c61f9
KG
656 * Waiting for itnim create response from firmware.
657 */
658static void
659bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
660{
661 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
662 bfa_trc(itnim->bfa, event);
663
664 switch (event) {
665 case BFA_ITNIM_SM_FWRSP:
666 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
667 itnim->is_online = BFA_TRUE;
668 bfa_itnim_iotov_online(itnim);
669 bfa_itnim_online_cb(itnim);
670 break;
671
672 case BFA_ITNIM_SM_DELETE:
673 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
674 break;
675
676 case BFA_ITNIM_SM_OFFLINE:
677 if (bfa_itnim_send_fwdelete(itnim))
678 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
679 else
680 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
681 break;
682
683 case BFA_ITNIM_SM_HWFAIL:
684 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
685 break;
686
687 default:
688 bfa_sm_fault(itnim->bfa, event);
689 }
690}
691
692static void
693bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
694 enum bfa_itnim_event event)
695{
696 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 bfa_trc(itnim->bfa, event);
698
699 switch (event) {
700 case BFA_ITNIM_SM_QRESUME:
701 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
702 bfa_itnim_send_fwcreate(itnim);
703 break;
704
705 case BFA_ITNIM_SM_DELETE:
706 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
707 bfa_reqq_wcancel(&itnim->reqq_wait);
708 bfa_fcpim_delitn(itnim);
709 break;
710
711 case BFA_ITNIM_SM_OFFLINE:
712 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
713 bfa_reqq_wcancel(&itnim->reqq_wait);
714 bfa_itnim_offline_cb(itnim);
715 break;
716
717 case BFA_ITNIM_SM_HWFAIL:
718 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
719 bfa_reqq_wcancel(&itnim->reqq_wait);
720 break;
721
722 default:
723 bfa_sm_fault(itnim->bfa, event);
724 }
725}
726
5fbe25c7 727/*
a36c61f9
KG
728 * Waiting for itnim create response from firmware, a delete is pending.
729 */
730static void
731bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
732 enum bfa_itnim_event event)
733{
734 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
735 bfa_trc(itnim->bfa, event);
736
737 switch (event) {
738 case BFA_ITNIM_SM_FWRSP:
739 if (bfa_itnim_send_fwdelete(itnim))
740 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
741 else
742 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
743 break;
744
745 case BFA_ITNIM_SM_HWFAIL:
746 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
747 bfa_fcpim_delitn(itnim);
748 break;
749
750 default:
751 bfa_sm_fault(itnim->bfa, event);
752 }
753}
754
5fbe25c7 755/*
a36c61f9
KG
756 * Online state - normal parking state.
757 */
758static void
759bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
760{
761 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
762 bfa_trc(itnim->bfa, event);
763
764 switch (event) {
765 case BFA_ITNIM_SM_OFFLINE:
766 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
767 itnim->is_online = BFA_FALSE;
768 bfa_itnim_iotov_start(itnim);
769 bfa_itnim_cleanup(itnim);
770 break;
771
772 case BFA_ITNIM_SM_DELETE:
773 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
774 itnim->is_online = BFA_FALSE;
775 bfa_itnim_cleanup(itnim);
776 break;
777
778 case BFA_ITNIM_SM_SLER:
779 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
780 itnim->is_online = BFA_FALSE;
781 bfa_itnim_iotov_start(itnim);
782 bfa_itnim_sler_cb(itnim);
783 break;
784
785 case BFA_ITNIM_SM_HWFAIL:
786 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
787 itnim->is_online = BFA_FALSE;
788 bfa_itnim_iotov_start(itnim);
789 bfa_itnim_iocdisable_cleanup(itnim);
790 break;
791
792 default:
793 bfa_sm_fault(itnim->bfa, event);
794 }
795}
796
5fbe25c7 797/*
a36c61f9
KG
798 * Second level error recovery need.
799 */
800static void
801bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
802{
803 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
804 bfa_trc(itnim->bfa, event);
805
806 switch (event) {
807 case BFA_ITNIM_SM_OFFLINE:
808 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
809 bfa_itnim_cleanup(itnim);
810 break;
811
812 case BFA_ITNIM_SM_DELETE:
813 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
814 bfa_itnim_cleanup(itnim);
815 bfa_itnim_iotov_delete(itnim);
816 break;
817
818 case BFA_ITNIM_SM_HWFAIL:
819 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
820 bfa_itnim_iocdisable_cleanup(itnim);
821 break;
822
823 default:
824 bfa_sm_fault(itnim->bfa, event);
825 }
826}
827
5fbe25c7 828/*
a36c61f9
KG
829 * Going offline. Waiting for active IO cleanup.
830 */
831static void
832bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
833 enum bfa_itnim_event event)
834{
835 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
836 bfa_trc(itnim->bfa, event);
837
838 switch (event) {
839 case BFA_ITNIM_SM_CLEANUP:
840 if (bfa_itnim_send_fwdelete(itnim))
841 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
842 else
843 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
844 break;
845
846 case BFA_ITNIM_SM_DELETE:
847 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
848 bfa_itnim_iotov_delete(itnim);
849 break;
850
851 case BFA_ITNIM_SM_HWFAIL:
852 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
853 bfa_itnim_iocdisable_cleanup(itnim);
854 bfa_itnim_offline_cb(itnim);
855 break;
856
857 case BFA_ITNIM_SM_SLER:
858 break;
859
860 default:
861 bfa_sm_fault(itnim->bfa, event);
862 }
863}
864
5fbe25c7 865/*
a36c61f9
KG
866 * Deleting itnim. Waiting for active IO cleanup.
867 */
868static void
869bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
870 enum bfa_itnim_event event)
871{
872 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
873 bfa_trc(itnim->bfa, event);
874
875 switch (event) {
876 case BFA_ITNIM_SM_CLEANUP:
877 if (bfa_itnim_send_fwdelete(itnim))
878 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
879 else
880 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
881 break;
882
883 case BFA_ITNIM_SM_HWFAIL:
884 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
885 bfa_itnim_iocdisable_cleanup(itnim);
886 break;
887
888 default:
889 bfa_sm_fault(itnim->bfa, event);
890 }
891}
892
5fbe25c7 893/*
a36c61f9
KG
894 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
895 */
896static void
897bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
898{
899 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
900 bfa_trc(itnim->bfa, event);
901
902 switch (event) {
903 case BFA_ITNIM_SM_FWRSP:
904 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
905 bfa_itnim_offline_cb(itnim);
906 break;
907
908 case BFA_ITNIM_SM_DELETE:
909 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
910 break;
911
912 case BFA_ITNIM_SM_HWFAIL:
913 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
914 bfa_itnim_offline_cb(itnim);
915 break;
916
917 default:
918 bfa_sm_fault(itnim->bfa, event);
919 }
920}
921
922static void
923bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
924 enum bfa_itnim_event event)
925{
926 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
927 bfa_trc(itnim->bfa, event);
928
929 switch (event) {
930 case BFA_ITNIM_SM_QRESUME:
931 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
932 bfa_itnim_send_fwdelete(itnim);
933 break;
934
935 case BFA_ITNIM_SM_DELETE:
936 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
937 break;
938
939 case BFA_ITNIM_SM_HWFAIL:
940 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
941 bfa_reqq_wcancel(&itnim->reqq_wait);
942 bfa_itnim_offline_cb(itnim);
943 break;
944
945 default:
946 bfa_sm_fault(itnim->bfa, event);
947 }
948}
949
5fbe25c7 950/*
a36c61f9
KG
951 * Offline state.
952 */
953static void
954bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
955{
956 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
957 bfa_trc(itnim->bfa, event);
958
959 switch (event) {
960 case BFA_ITNIM_SM_DELETE:
961 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
962 bfa_itnim_iotov_delete(itnim);
963 bfa_fcpim_delitn(itnim);
964 break;
965
966 case BFA_ITNIM_SM_ONLINE:
967 if (bfa_itnim_send_fwcreate(itnim))
968 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
969 else
970 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
971 break;
972
973 case BFA_ITNIM_SM_HWFAIL:
974 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
975 break;
976
977 default:
978 bfa_sm_fault(itnim->bfa, event);
979 }
980}
981
5fbe25c7 982/*
a36c61f9
KG
983 * IOC h/w failed state.
984 */
985static void
986bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
987 enum bfa_itnim_event event)
988{
989 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
990 bfa_trc(itnim->bfa, event);
991
992 switch (event) {
993 case BFA_ITNIM_SM_DELETE:
994 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
995 bfa_itnim_iotov_delete(itnim);
996 bfa_fcpim_delitn(itnim);
997 break;
998
999 case BFA_ITNIM_SM_OFFLINE:
1000 bfa_itnim_offline_cb(itnim);
1001 break;
1002
1003 case BFA_ITNIM_SM_ONLINE:
1004 if (bfa_itnim_send_fwcreate(itnim))
1005 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1006 else
1007 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1008 break;
1009
1010 case BFA_ITNIM_SM_HWFAIL:
1011 break;
1012
1013 default:
1014 bfa_sm_fault(itnim->bfa, event);
1015 }
1016}
1017
5fbe25c7 1018/*
a36c61f9
KG
1019 * Itnim is deleted, waiting for firmware response to delete.
1020 */
1021static void
1022bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1023{
1024 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1025 bfa_trc(itnim->bfa, event);
1026
1027 switch (event) {
1028 case BFA_ITNIM_SM_FWRSP:
1029 case BFA_ITNIM_SM_HWFAIL:
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 bfa_fcpim_delitn(itnim);
1032 break;
1033
1034 default:
1035 bfa_sm_fault(itnim->bfa, event);
1036 }
1037}
1038
1039static void
1040bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1041 enum bfa_itnim_event event)
1042{
1043 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1044 bfa_trc(itnim->bfa, event);
1045
1046 switch (event) {
1047 case BFA_ITNIM_SM_QRESUME:
1048 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1049 bfa_itnim_send_fwdelete(itnim);
1050 break;
1051
1052 case BFA_ITNIM_SM_HWFAIL:
1053 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1054 bfa_reqq_wcancel(&itnim->reqq_wait);
1055 bfa_fcpim_delitn(itnim);
1056 break;
1057
1058 default:
1059 bfa_sm_fault(itnim->bfa, event);
1060 }
1061}
1062
5fbe25c7 1063/*
a36c61f9
KG
1064 * Initiate cleanup of all IOs on an IOC failure.
1065 */
1066static void
1067bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1068{
1069 struct bfa_tskim_s *tskim;
1070 struct bfa_ioim_s *ioim;
1071 struct list_head *qe, *qen;
1072
1073 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1074 tskim = (struct bfa_tskim_s *) qe;
1075 bfa_tskim_iocdisable(tskim);
1076 }
1077
1078 list_for_each_safe(qe, qen, &itnim->io_q) {
1079 ioim = (struct bfa_ioim_s *) qe;
1080 bfa_ioim_iocdisable(ioim);
1081 }
1082
5fbe25c7 1083 /*
a36c61f9
KG
1084 * For IO request in pending queue, we pretend an early timeout.
1085 */
1086 list_for_each_safe(qe, qen, &itnim->pending_q) {
1087 ioim = (struct bfa_ioim_s *) qe;
1088 bfa_ioim_tov(ioim);
1089 }
1090
1091 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1092 ioim = (struct bfa_ioim_s *) qe;
1093 bfa_ioim_iocdisable(ioim);
1094 }
1095}
1096
5fbe25c7 1097/*
a36c61f9
KG
1098 * IO cleanup completion
1099 */
1100static void
1101bfa_itnim_cleanp_comp(void *itnim_cbarg)
1102{
1103 struct bfa_itnim_s *itnim = itnim_cbarg;
1104
1105 bfa_stats(itnim, cleanup_comps);
1106 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1107}
1108
5fbe25c7 1109/*
a36c61f9
KG
1110 * Initiate cleanup of all IOs.
1111 */
1112static void
1113bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1114{
1115 struct bfa_ioim_s *ioim;
1116 struct bfa_tskim_s *tskim;
1117 struct list_head *qe, *qen;
1118
1119 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1120
1121 list_for_each_safe(qe, qen, &itnim->io_q) {
1122 ioim = (struct bfa_ioim_s *) qe;
1123
5fbe25c7 1124 /*
a36c61f9
KG
1125 * Move IO to a cleanup queue from active queue so that a later
1126 * TM will not pickup this IO.
1127 */
1128 list_del(&ioim->qe);
1129 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1130
1131 bfa_wc_up(&itnim->wc);
1132 bfa_ioim_cleanup(ioim);
1133 }
1134
1135 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1136 tskim = (struct bfa_tskim_s *) qe;
1137 bfa_wc_up(&itnim->wc);
1138 bfa_tskim_cleanup(tskim);
1139 }
1140
1141 bfa_wc_wait(&itnim->wc);
1142}
1143
1144static void
1145__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1146{
1147 struct bfa_itnim_s *itnim = cbarg;
1148
1149 if (complete)
1150 bfa_cb_itnim_online(itnim->ditn);
1151}
1152
1153static void
1154__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1155{
1156 struct bfa_itnim_s *itnim = cbarg;
1157
1158 if (complete)
1159 bfa_cb_itnim_offline(itnim->ditn);
1160}
1161
1162static void
1163__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1164{
1165 struct bfa_itnim_s *itnim = cbarg;
1166
1167 if (complete)
1168 bfa_cb_itnim_sler(itnim->ditn);
1169}
1170
5fbe25c7 1171/*
a36c61f9
KG
1172 * Call to resume any I/O requests waiting for room in request queue.
1173 */
1174static void
1175bfa_itnim_qresume(void *cbarg)
1176{
1177 struct bfa_itnim_s *itnim = cbarg;
1178
1179 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1180}
1181
1182
1183
1184
5fbe25c7 1185/*
a36c61f9
KG
1186 * bfa_itnim_public
1187 */
1188
1189void
1190bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1191{
1192 bfa_wc_down(&itnim->wc);
1193}
1194
1195void
1196bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1197{
1198 bfa_wc_down(&itnim->wc);
1199}
1200
1201void
1202bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1203 u32 *dm_len)
1204{
5fbe25c7 1205 /*
a36c61f9
KG
1206 * ITN memory
1207 */
1208 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1209}
1210
1211void
1212bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1213{
1214 struct bfa_s *bfa = fcpim->bfa;
1215 struct bfa_itnim_s *itnim;
1216 int i, j;
1217
1218 INIT_LIST_HEAD(&fcpim->itnim_q);
1219
1220 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1221 fcpim->itnim_arr = itnim;
1222
1223 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
6a18b167 1224 memset(itnim, 0, sizeof(struct bfa_itnim_s));
a36c61f9
KG
1225 itnim->bfa = bfa;
1226 itnim->fcpim = fcpim;
1227 itnim->reqq = BFA_REQQ_QOS_LO;
1228 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1229 itnim->iotov_active = BFA_FALSE;
1230 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1231
1232 INIT_LIST_HEAD(&itnim->io_q);
1233 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1234 INIT_LIST_HEAD(&itnim->pending_q);
1235 INIT_LIST_HEAD(&itnim->tsk_q);
1236 INIT_LIST_HEAD(&itnim->delay_comp_q);
1237 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1238 itnim->ioprofile.io_latency.min[j] = ~0;
1239 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1240 }
1241
1242 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1243}
1244
1245void
1246bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1247{
1248 bfa_stats(itnim, ioc_disabled);
1249 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1250}
1251
1252static bfa_boolean_t
1253bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1254{
1255 struct bfi_itnim_create_req_s *m;
1256
1257 itnim->msg_no++;
1258
5fbe25c7 1259 /*
a36c61f9
KG
1260 * check for room in queue to send request now
1261 */
1262 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1263 if (!m) {
1264 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1265 return BFA_FALSE;
1266 }
1267
1268 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1269 bfa_lpuid(itnim->bfa));
1270 m->fw_handle = itnim->rport->fw_handle;
1271 m->class = FC_CLASS_3;
1272 m->seq_rec = itnim->seq_rec;
1273 m->msg_no = itnim->msg_no;
1274 bfa_stats(itnim, fw_create);
1275
5fbe25c7 1276 /*
a36c61f9
KG
1277 * queue I/O message to firmware
1278 */
1279 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1280 return BFA_TRUE;
1281}
1282
1283static bfa_boolean_t
1284bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1285{
1286 struct bfi_itnim_delete_req_s *m;
1287
5fbe25c7 1288 /*
a36c61f9
KG
1289 * check for room in queue to send request now
1290 */
1291 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1292 if (!m) {
1293 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1294 return BFA_FALSE;
1295 }
1296
1297 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1298 bfa_lpuid(itnim->bfa));
1299 m->fw_handle = itnim->rport->fw_handle;
1300 bfa_stats(itnim, fw_delete);
1301
5fbe25c7 1302 /*
a36c61f9
KG
1303 * queue I/O message to firmware
1304 */
1305 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1306 return BFA_TRUE;
1307}
1308
5fbe25c7 1309/*
a36c61f9
KG
1310 * Cleanup all pending failed inflight requests.
1311 */
1312static void
1313bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1314{
1315 struct bfa_ioim_s *ioim;
1316 struct list_head *qe, *qen;
1317
1318 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1319 ioim = (struct bfa_ioim_s *)qe;
1320 bfa_ioim_delayed_comp(ioim, iotov);
1321 }
1322}
1323
5fbe25c7 1324/*
a36c61f9
KG
1325 * Start all pending IO requests.
1326 */
1327static void
1328bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1329{
1330 struct bfa_ioim_s *ioim;
1331
1332 bfa_itnim_iotov_stop(itnim);
1333
5fbe25c7 1334 /*
a36c61f9
KG
1335 * Abort all inflight IO requests in the queue
1336 */
1337 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1338
5fbe25c7 1339 /*
a36c61f9
KG
1340 * Start all pending IO requests.
1341 */
1342 while (!list_empty(&itnim->pending_q)) {
1343 bfa_q_deq(&itnim->pending_q, &ioim);
1344 list_add_tail(&ioim->qe, &itnim->io_q);
1345 bfa_ioim_start(ioim);
1346 }
1347}
1348
5fbe25c7 1349/*
a36c61f9
KG
1350 * Fail all pending IO requests
1351 */
1352static void
1353bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1354{
1355 struct bfa_ioim_s *ioim;
1356
5fbe25c7 1357 /*
a36c61f9
KG
1358 * Fail all inflight IO requests in the queue
1359 */
1360 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1361
5fbe25c7 1362 /*
a36c61f9
KG
1363 * Fail any pending IO requests.
1364 */
1365 while (!list_empty(&itnim->pending_q)) {
1366 bfa_q_deq(&itnim->pending_q, &ioim);
1367 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1368 bfa_ioim_tov(ioim);
1369 }
1370}
1371
5fbe25c7 1372/*
a36c61f9
KG
1373 * IO TOV timer callback. Fail any pending IO requests.
1374 */
1375static void
1376bfa_itnim_iotov(void *itnim_arg)
1377{
1378 struct bfa_itnim_s *itnim = itnim_arg;
1379
1380 itnim->iotov_active = BFA_FALSE;
1381
1382 bfa_cb_itnim_tov_begin(itnim->ditn);
1383 bfa_itnim_iotov_cleanup(itnim);
1384 bfa_cb_itnim_tov(itnim->ditn);
1385}
1386
5fbe25c7 1387/*
a36c61f9
KG
1388 * Start IO TOV timer for failing back pending IO requests in offline state.
1389 */
1390static void
1391bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1392{
1393 if (itnim->fcpim->path_tov > 0) {
1394
1395 itnim->iotov_active = BFA_TRUE;
1396 bfa_assert(bfa_itnim_hold_io(itnim));
1397 bfa_timer_start(itnim->bfa, &itnim->timer,
1398 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1399 }
1400}
1401
5fbe25c7 1402/*
a36c61f9
KG
1403 * Stop IO TOV timer.
1404 */
1405static void
1406bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1407{
1408 if (itnim->iotov_active) {
1409 itnim->iotov_active = BFA_FALSE;
1410 bfa_timer_stop(&itnim->timer);
1411 }
1412}
1413
5fbe25c7 1414/*
a36c61f9
KG
1415 * Stop IO TOV timer.
1416 */
1417static void
1418bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1419{
1420 bfa_boolean_t pathtov_active = BFA_FALSE;
1421
1422 if (itnim->iotov_active)
1423 pathtov_active = BFA_TRUE;
1424
1425 bfa_itnim_iotov_stop(itnim);
1426 if (pathtov_active)
1427 bfa_cb_itnim_tov_begin(itnim->ditn);
1428 bfa_itnim_iotov_cleanup(itnim);
1429 if (pathtov_active)
1430 bfa_cb_itnim_tov(itnim->ditn);
1431}
1432
1433static void
1434bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1435{
1436 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1437 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1438 itnim->stats.iocomp_aborted;
1439 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1440 itnim->stats.iocomp_timedout;
1441 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1442 itnim->stats.iocom_sqer_needed;
1443 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1444 itnim->stats.iocom_res_free;
1445 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1446 itnim->stats.iocom_hostabrts;
1447 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1448 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1449 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1450}
1451
1452
1453
5fbe25c7 1454/*
a36c61f9
KG
1455 * bfa_itnim_public
1456 */
1457
5fbe25c7 1458/*
a36c61f9
KG
1459 * Itnim interrupt processing.
1460 */
1461void
1462bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1463{
1464 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1465 union bfi_itnim_i2h_msg_u msg;
1466 struct bfa_itnim_s *itnim;
1467
1468 bfa_trc(bfa, m->mhdr.msg_id);
1469
1470 msg.msg = m;
1471
1472 switch (m->mhdr.msg_id) {
1473 case BFI_ITNIM_I2H_CREATE_RSP:
1474 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1475 msg.create_rsp->bfa_handle);
1476 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1477 bfa_stats(itnim, create_comps);
1478 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1479 break;
1480
1481 case BFI_ITNIM_I2H_DELETE_RSP:
1482 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483 msg.delete_rsp->bfa_handle);
1484 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1485 bfa_stats(itnim, delete_comps);
1486 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487 break;
1488
1489 case BFI_ITNIM_I2H_SLER_EVENT:
1490 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491 msg.sler_event->bfa_handle);
1492 bfa_stats(itnim, sler_events);
1493 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1494 break;
1495
1496 default:
1497 bfa_trc(bfa, m->mhdr.msg_id);
1498 bfa_assert(0);
1499 }
1500}
1501
1502
1503
5fbe25c7 1504/*
a36c61f9
KG
1505 * bfa_itnim_api
1506 */
1507
1508struct bfa_itnim_s *
1509bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1510{
1511 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1512 struct bfa_itnim_s *itnim;
1513
1514 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1515 bfa_assert(itnim->rport == rport);
1516
1517 itnim->ditn = ditn;
1518
1519 bfa_stats(itnim, creates);
1520 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1521
1522 return itnim;
1523}
1524
1525void
1526bfa_itnim_delete(struct bfa_itnim_s *itnim)
1527{
1528 bfa_stats(itnim, deletes);
1529 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1530}
1531
1532void
1533bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1534{
1535 itnim->seq_rec = seq_rec;
1536 bfa_stats(itnim, onlines);
1537 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1538}
1539
1540void
1541bfa_itnim_offline(struct bfa_itnim_s *itnim)
1542{
1543 bfa_stats(itnim, offlines);
1544 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1545}
1546
5fbe25c7 1547/*
a36c61f9
KG
1548 * Return true if itnim is considered offline for holding off IO request.
1549 * IO is not held if itnim is being deleted.
1550 */
1551bfa_boolean_t
1552bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1553{
1554 return itnim->fcpim->path_tov && itnim->iotov_active &&
1555 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1556 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1557 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1558 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1559 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1560 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1561}
1562
1563bfa_status_t
1564bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1565 struct bfa_itnim_ioprofile_s *ioprofile)
1566{
1567 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1568 if (!fcpim->io_profile)
1569 return BFA_STATUS_IOPROFILE_OFF;
1570
1571 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1572 itnim->ioprofile.io_profile_start_time =
1573 bfa_io_profile_start_time(itnim->bfa);
1574 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1575 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1576 *ioprofile = itnim->ioprofile;
1577
1578 return BFA_STATUS_OK;
1579}
1580
a36c61f9
KG
1581void
1582bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1583{
1584 int j;
6a18b167
JH
1585 memset(&itnim->stats, 0, sizeof(itnim->stats));
1586 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
a36c61f9
KG
1587 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1588 itnim->ioprofile.io_latency.min[j] = ~0;
1589}
1590
5fbe25c7 1591/*
a36c61f9
KG
1592 * BFA IO module state machine functions
1593 */
1594
5fbe25c7 1595/*
a36c61f9
KG
1596 * IO is not started (unallocated).
1597 */
1598static void
1599bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1600{
1601 bfa_trc_fp(ioim->bfa, ioim->iotag);
1602 bfa_trc_fp(ioim->bfa, event);
1603
1604 switch (event) {
1605 case BFA_IOIM_SM_START:
1606 if (!bfa_itnim_is_online(ioim->itnim)) {
1607 if (!bfa_itnim_hold_io(ioim->itnim)) {
1608 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1609 list_del(&ioim->qe);
1610 list_add_tail(&ioim->qe,
1611 &ioim->fcpim->ioim_comp_q);
1612 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1613 __bfa_cb_ioim_pathtov, ioim);
1614 } else {
1615 list_del(&ioim->qe);
1616 list_add_tail(&ioim->qe,
1617 &ioim->itnim->pending_q);
1618 }
1619 break;
1620 }
1621
1622 if (ioim->nsges > BFI_SGE_INLINE) {
e3e7d3ee 1623 if (!bfa_ioim_sgpg_alloc(ioim)) {
a36c61f9
KG
1624 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1625 return;
1626 }
1627 }
1628
1629 if (!bfa_ioim_send_ioreq(ioim)) {
1630 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1631 break;
1632 }
1633
1634 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1635 break;
1636
1637 case BFA_IOIM_SM_IOTOV:
1638 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1639 bfa_ioim_move_to_comp_q(ioim);
1640 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1641 __bfa_cb_ioim_pathtov, ioim);
1642 break;
1643
1644 case BFA_IOIM_SM_ABORT:
5fbe25c7 1645 /*
a36c61f9
KG
1646 * IO in pending queue can get abort requests. Complete abort
1647 * requests immediately.
1648 */
1649 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1650 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1651 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1652 __bfa_cb_ioim_abort, ioim);
1653 break;
1654
1655 default:
1656 bfa_sm_fault(ioim->bfa, event);
1657 }
1658}
1659
5fbe25c7 1660/*
a36c61f9
KG
1661 * IO is waiting for SG pages.
1662 */
1663static void
1664bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1665{
1666 bfa_trc(ioim->bfa, ioim->iotag);
1667 bfa_trc(ioim->bfa, event);
1668
1669 switch (event) {
1670 case BFA_IOIM_SM_SGALLOCED:
1671 if (!bfa_ioim_send_ioreq(ioim)) {
1672 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1673 break;
1674 }
1675 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1676 break;
1677
1678 case BFA_IOIM_SM_CLEANUP:
1679 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1680 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1681 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1682 ioim);
1683 bfa_ioim_notify_cleanup(ioim);
1684 break;
1685
1686 case BFA_IOIM_SM_ABORT:
1687 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1688 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1689 bfa_ioim_move_to_comp_q(ioim);
1690 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1691 ioim);
1692 break;
1693
1694 case BFA_IOIM_SM_HWFAIL:
1695 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1696 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1697 bfa_ioim_move_to_comp_q(ioim);
1698 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1699 ioim);
1700 break;
1701
1702 default:
1703 bfa_sm_fault(ioim->bfa, event);
1704 }
1705}
1706
5fbe25c7 1707/*
a36c61f9
KG
1708 * IO is active.
1709 */
1710static void
1711bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1712{
1713 bfa_trc_fp(ioim->bfa, ioim->iotag);
1714 bfa_trc_fp(ioim->bfa, event);
1715
1716 switch (event) {
1717 case BFA_IOIM_SM_COMP_GOOD:
1718 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1719 bfa_ioim_move_to_comp_q(ioim);
1720 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1721 __bfa_cb_ioim_good_comp, ioim);
1722 break;
1723
1724 case BFA_IOIM_SM_COMP:
1725 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1726 bfa_ioim_move_to_comp_q(ioim);
1727 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1728 ioim);
1729 break;
1730
1731 case BFA_IOIM_SM_DONE:
1732 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1733 bfa_ioim_move_to_comp_q(ioim);
1734 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1735 ioim);
1736 break;
1737
1738 case BFA_IOIM_SM_ABORT:
1739 ioim->iosp->abort_explicit = BFA_TRUE;
1740 ioim->io_cbfn = __bfa_cb_ioim_abort;
1741
1742 if (bfa_ioim_send_abort(ioim))
1743 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1744 else {
1745 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1746 bfa_stats(ioim->itnim, qwait);
1747 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1748 &ioim->iosp->reqq_wait);
1749 }
1750 break;
1751
1752 case BFA_IOIM_SM_CLEANUP:
1753 ioim->iosp->abort_explicit = BFA_FALSE;
1754 ioim->io_cbfn = __bfa_cb_ioim_failed;
1755
1756 if (bfa_ioim_send_abort(ioim))
1757 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1758 else {
1759 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1760 bfa_stats(ioim->itnim, qwait);
1761 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1762 &ioim->iosp->reqq_wait);
1763 }
1764 break;
1765
1766 case BFA_IOIM_SM_HWFAIL:
1767 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1768 bfa_ioim_move_to_comp_q(ioim);
1769 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1770 ioim);
1771 break;
1772
1773 case BFA_IOIM_SM_SQRETRY:
1774 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1775 /* max retry completed free IO */
1776 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1777 bfa_ioim_move_to_comp_q(ioim);
1778 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1779 __bfa_cb_ioim_failed, ioim);
1780 break;
1781 }
1782 /* waiting for IO tag resource free */
1783 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1784 break;
1785
1786 default:
1787 bfa_sm_fault(ioim->bfa, event);
1788 }
1789}
1790
5fbe25c7 1791/*
a36c61f9
KG
1792* IO is retried with new tag.
1793*/
1794static void
1795bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1796{
1797 bfa_trc_fp(ioim->bfa, ioim->iotag);
1798 bfa_trc_fp(ioim->bfa, event);
1799
1800 switch (event) {
1801 case BFA_IOIM_SM_FREE:
1802 /* abts and rrq done. Now retry the IO with new tag */
1803 if (!bfa_ioim_send_ioreq(ioim)) {
1804 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1805 break;
1806 }
1807 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1808 break;
1809
1810 case BFA_IOIM_SM_CLEANUP:
1811 ioim->iosp->abort_explicit = BFA_FALSE;
1812 ioim->io_cbfn = __bfa_cb_ioim_failed;
1813
1814 if (bfa_ioim_send_abort(ioim))
1815 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1816 else {
1817 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1818 bfa_stats(ioim->itnim, qwait);
1819 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1820 &ioim->iosp->reqq_wait);
1821 }
1822 break;
1823
1824 case BFA_IOIM_SM_HWFAIL:
1825 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1826 bfa_ioim_move_to_comp_q(ioim);
1827 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1828 __bfa_cb_ioim_failed, ioim);
1829 break;
1830
1831 case BFA_IOIM_SM_ABORT:
5fbe25c7 1832 /* in this state IO abort is done.
a36c61f9
KG
1833 * Waiting for IO tag resource free.
1834 */
1835 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1836 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1837 ioim);
1838 break;
1839
1840 default:
1841 bfa_sm_fault(ioim->bfa, event);
1842 }
1843}
1844
5fbe25c7 1845/*
a36c61f9
KG
1846 * IO is being aborted, waiting for completion from firmware.
1847 */
1848static void
1849bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1850{
1851 bfa_trc(ioim->bfa, ioim->iotag);
1852 bfa_trc(ioim->bfa, event);
1853
1854 switch (event) {
1855 case BFA_IOIM_SM_COMP_GOOD:
1856 case BFA_IOIM_SM_COMP:
1857 case BFA_IOIM_SM_DONE:
1858 case BFA_IOIM_SM_FREE:
1859 break;
1860
1861 case BFA_IOIM_SM_ABORT_DONE:
1862 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1863 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1864 ioim);
1865 break;
1866
1867 case BFA_IOIM_SM_ABORT_COMP:
1868 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1869 bfa_ioim_move_to_comp_q(ioim);
1870 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1871 ioim);
1872 break;
1873
1874 case BFA_IOIM_SM_COMP_UTAG:
1875 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1876 bfa_ioim_move_to_comp_q(ioim);
1877 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1878 ioim);
1879 break;
1880
1881 case BFA_IOIM_SM_CLEANUP:
1882 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1883 ioim->iosp->abort_explicit = BFA_FALSE;
1884
1885 if (bfa_ioim_send_abort(ioim))
1886 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1887 else {
1888 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1889 bfa_stats(ioim->itnim, qwait);
1890 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1891 &ioim->iosp->reqq_wait);
1892 }
1893 break;
1894
1895 case BFA_IOIM_SM_HWFAIL:
1896 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1897 bfa_ioim_move_to_comp_q(ioim);
1898 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1899 ioim);
1900 break;
1901
1902 default:
1903 bfa_sm_fault(ioim->bfa, event);
1904 }
1905}
1906
5fbe25c7 1907/*
a36c61f9
KG
1908 * IO is being cleaned up (implicit abort), waiting for completion from
1909 * firmware.
1910 */
1911static void
1912bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1913{
1914 bfa_trc(ioim->bfa, ioim->iotag);
1915 bfa_trc(ioim->bfa, event);
1916
1917 switch (event) {
1918 case BFA_IOIM_SM_COMP_GOOD:
1919 case BFA_IOIM_SM_COMP:
1920 case BFA_IOIM_SM_DONE:
1921 case BFA_IOIM_SM_FREE:
1922 break;
1923
1924 case BFA_IOIM_SM_ABORT:
5fbe25c7 1925 /*
a36c61f9
KG
1926 * IO is already being aborted implicitly
1927 */
1928 ioim->io_cbfn = __bfa_cb_ioim_abort;
1929 break;
1930
1931 case BFA_IOIM_SM_ABORT_DONE:
1932 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1933 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1934 bfa_ioim_notify_cleanup(ioim);
1935 break;
1936
1937 case BFA_IOIM_SM_ABORT_COMP:
1938 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1939 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1940 bfa_ioim_notify_cleanup(ioim);
1941 break;
1942
1943 case BFA_IOIM_SM_COMP_UTAG:
1944 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1945 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1946 bfa_ioim_notify_cleanup(ioim);
1947 break;
1948
1949 case BFA_IOIM_SM_HWFAIL:
1950 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1951 bfa_ioim_move_to_comp_q(ioim);
1952 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1953 ioim);
1954 break;
1955
1956 case BFA_IOIM_SM_CLEANUP:
5fbe25c7 1957 /*
a36c61f9
KG
1958 * IO can be in cleanup state already due to TM command.
1959 * 2nd cleanup request comes from ITN offline event.
1960 */
1961 break;
1962
1963 default:
1964 bfa_sm_fault(ioim->bfa, event);
1965 }
1966}
1967
5fbe25c7 1968/*
a36c61f9
KG
1969 * IO is waiting for room in request CQ
1970 */
1971static void
1972bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1973{
1974 bfa_trc(ioim->bfa, ioim->iotag);
1975 bfa_trc(ioim->bfa, event);
1976
1977 switch (event) {
1978 case BFA_IOIM_SM_QRESUME:
1979 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1980 bfa_ioim_send_ioreq(ioim);
1981 break;
1982
1983 case BFA_IOIM_SM_ABORT:
1984 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1985 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1986 bfa_ioim_move_to_comp_q(ioim);
1987 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1988 ioim);
1989 break;
1990
1991 case BFA_IOIM_SM_CLEANUP:
1992 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1993 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1994 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1995 ioim);
1996 bfa_ioim_notify_cleanup(ioim);
1997 break;
1998
1999 case BFA_IOIM_SM_HWFAIL:
2000 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2001 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2002 bfa_ioim_move_to_comp_q(ioim);
2003 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2004 ioim);
2005 break;
2006
2007 default:
2008 bfa_sm_fault(ioim->bfa, event);
2009 }
2010}
2011
5fbe25c7 2012/*
a36c61f9
KG
2013 * Active IO is being aborted, waiting for room in request CQ.
2014 */
2015static void
2016bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2017{
2018 bfa_trc(ioim->bfa, ioim->iotag);
2019 bfa_trc(ioim->bfa, event);
2020
2021 switch (event) {
2022 case BFA_IOIM_SM_QRESUME:
2023 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2024 bfa_ioim_send_abort(ioim);
2025 break;
2026
2027 case BFA_IOIM_SM_CLEANUP:
2028 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2029 ioim->iosp->abort_explicit = BFA_FALSE;
2030 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2031 break;
2032
2033 case BFA_IOIM_SM_COMP_GOOD:
2034 case BFA_IOIM_SM_COMP:
2035 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2036 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2037 bfa_ioim_move_to_comp_q(ioim);
2038 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2039 ioim);
2040 break;
2041
2042 case BFA_IOIM_SM_DONE:
2043 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2044 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2045 bfa_ioim_move_to_comp_q(ioim);
2046 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2047 ioim);
2048 break;
2049
2050 case BFA_IOIM_SM_HWFAIL:
2051 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2052 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2053 bfa_ioim_move_to_comp_q(ioim);
2054 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2055 ioim);
2056 break;
2057
2058 default:
2059 bfa_sm_fault(ioim->bfa, event);
2060 }
2061}
2062
5fbe25c7 2063/*
a36c61f9
KG
2064 * Active IO is being cleaned up, waiting for room in request CQ.
2065 */
2066static void
2067bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2068{
2069 bfa_trc(ioim->bfa, ioim->iotag);
2070 bfa_trc(ioim->bfa, event);
2071
2072 switch (event) {
2073 case BFA_IOIM_SM_QRESUME:
2074 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2075 bfa_ioim_send_abort(ioim);
2076 break;
2077
2078 case BFA_IOIM_SM_ABORT:
5fbe25c7 2079 /*
a36c61f9
KG
2080 * IO is alraedy being cleaned up implicitly
2081 */
2082 ioim->io_cbfn = __bfa_cb_ioim_abort;
2083 break;
2084
2085 case BFA_IOIM_SM_COMP_GOOD:
2086 case BFA_IOIM_SM_COMP:
2087 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2088 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2089 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2090 bfa_ioim_notify_cleanup(ioim);
2091 break;
2092
2093 case BFA_IOIM_SM_DONE:
2094 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2095 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2096 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2097 bfa_ioim_notify_cleanup(ioim);
2098 break;
2099
2100 case BFA_IOIM_SM_HWFAIL:
2101 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2102 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2103 bfa_ioim_move_to_comp_q(ioim);
2104 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2105 ioim);
2106 break;
2107
2108 default:
2109 bfa_sm_fault(ioim->bfa, event);
2110 }
2111}
2112
5fbe25c7 2113/*
a36c61f9
KG
2114 * IO bfa callback is pending.
2115 */
2116static void
2117bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2118{
2119 bfa_trc_fp(ioim->bfa, ioim->iotag);
2120 bfa_trc_fp(ioim->bfa, event);
2121
2122 switch (event) {
2123 case BFA_IOIM_SM_HCB:
2124 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2125 bfa_ioim_free(ioim);
2126 break;
2127
2128 case BFA_IOIM_SM_CLEANUP:
2129 bfa_ioim_notify_cleanup(ioim);
2130 break;
2131
2132 case BFA_IOIM_SM_HWFAIL:
2133 break;
2134
2135 default:
2136 bfa_sm_fault(ioim->bfa, event);
2137 }
2138}
2139
5fbe25c7 2140/*
a36c61f9
KG
2141 * IO bfa callback is pending. IO resource cannot be freed.
2142 */
2143static void
2144bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2145{
2146 bfa_trc(ioim->bfa, ioim->iotag);
2147 bfa_trc(ioim->bfa, event);
2148
2149 switch (event) {
2150 case BFA_IOIM_SM_HCB:
2151 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2152 list_del(&ioim->qe);
2153 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2154 break;
2155
2156 case BFA_IOIM_SM_FREE:
2157 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2158 break;
2159
2160 case BFA_IOIM_SM_CLEANUP:
2161 bfa_ioim_notify_cleanup(ioim);
2162 break;
2163
2164 case BFA_IOIM_SM_HWFAIL:
2165 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2166 break;
2167
2168 default:
2169 bfa_sm_fault(ioim->bfa, event);
2170 }
2171}
2172
5fbe25c7 2173/*
a36c61f9
KG
2174 * IO is completed, waiting resource free from firmware.
2175 */
2176static void
2177bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2178{
2179 bfa_trc(ioim->bfa, ioim->iotag);
2180 bfa_trc(ioim->bfa, event);
2181
2182 switch (event) {
2183 case BFA_IOIM_SM_FREE:
2184 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2185 bfa_ioim_free(ioim);
2186 break;
2187
2188 case BFA_IOIM_SM_CLEANUP:
2189 bfa_ioim_notify_cleanup(ioim);
2190 break;
2191
2192 case BFA_IOIM_SM_HWFAIL:
2193 break;
2194
2195 default:
2196 bfa_sm_fault(ioim->bfa, event);
2197 }
2198}
2199
2200
a36c61f9
KG
2201static void
2202__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2203{
2204 struct bfa_ioim_s *ioim = cbarg;
2205
2206 if (!complete) {
2207 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2208 return;
2209 }
2210
2211 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2212}
2213
2214static void
2215__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2216{
2217 struct bfa_ioim_s *ioim = cbarg;
2218 struct bfi_ioim_rsp_s *m;
2219 u8 *snsinfo = NULL;
2220 u8 sns_len = 0;
2221 s32 residue = 0;
2222
2223 if (!complete) {
2224 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2225 return;
2226 }
2227
2228 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2229 if (m->io_status == BFI_IOIM_STS_OK) {
5fbe25c7 2230 /*
a36c61f9
KG
2231 * setup sense information, if present
2232 */
2233 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2234 m->sns_len) {
2235 sns_len = m->sns_len;
2236 snsinfo = ioim->iosp->snsinfo;
2237 }
2238
5fbe25c7 2239 /*
a36c61f9
KG
2240 * setup residue value correctly for normal completions
2241 */
2242 if (m->resid_flags == FCP_RESID_UNDER) {
ba816ea8 2243 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2244 bfa_stats(ioim->itnim, iocomp_underrun);
2245 }
2246 if (m->resid_flags == FCP_RESID_OVER) {
ba816ea8 2247 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2248 residue = -residue;
2249 bfa_stats(ioim->itnim, iocomp_overrun);
2250 }
2251 }
2252
2253 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2254 m->scsi_status, sns_len, snsinfo, residue);
2255}
2256
2257static void
2258__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2259{
2260 struct bfa_ioim_s *ioim = cbarg;
2261
2262 if (!complete) {
2263 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2264 return;
2265 }
2266
2267 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2268 0, 0, NULL, 0);
2269}
2270
2271static void
2272__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2273{
2274 struct bfa_ioim_s *ioim = cbarg;
2275
2276 bfa_stats(ioim->itnim, path_tov_expired);
2277 if (!complete) {
2278 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2279 return;
2280 }
2281
2282 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2283 0, 0, NULL, 0);
2284}
2285
2286static void
2287__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2288{
2289 struct bfa_ioim_s *ioim = cbarg;
2290
2291 if (!complete) {
2292 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2293 return;
2294 }
2295
2296 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2297}
2298
2299static void
2300bfa_ioim_sgpg_alloced(void *cbarg)
2301{
2302 struct bfa_ioim_s *ioim = cbarg;
2303
2304 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2305 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
e3e7d3ee 2306 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
a36c61f9
KG
2307 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2308}
2309
5fbe25c7 2310/*
a36c61f9
KG
2311 * Send I/O request to firmware.
2312 */
2313static bfa_boolean_t
2314bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2315{
2316 struct bfa_itnim_s *itnim = ioim->itnim;
2317 struct bfi_ioim_req_s *m;
f314878a 2318 static struct fcp_cmnd_s cmnd_z0 = {{{0}}};
e3e7d3ee 2319 struct bfi_sge_s *sge, *sgpge;
a36c61f9
KG
2320 u32 pgdlen = 0;
2321 u32 fcp_dl;
2322 u64 addr;
2323 struct scatterlist *sg;
e3e7d3ee 2324 struct bfa_sgpg_s *sgpg;
a36c61f9 2325 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
e3e7d3ee 2326 u32 i, sge_id, pgcumsz;
f314878a 2327 enum dma_data_direction dmadir;
a36c61f9 2328
5fbe25c7 2329 /*
a36c61f9
KG
2330 * check for room in queue to send request now
2331 */
2332 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2333 if (!m) {
2334 bfa_stats(ioim->itnim, qwait);
2335 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2336 &ioim->iosp->reqq_wait);
2337 return BFA_FALSE;
2338 }
2339
5fbe25c7 2340 /*
a36c61f9
KG
2341 * build i/o request message next
2342 */
ba816ea8 2343 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9 2344 m->rport_hdl = ioim->itnim->rport->fw_handle;
f314878a 2345 m->io_timeout = 0;
a36c61f9 2346
a36c61f9 2347 sge = &m->sges[0];
e3e7d3ee
MZ
2348 sgpg = ioim->sgpg;
2349 sge_id = 0;
2350 sgpge = NULL;
2351 pgcumsz = 0;
2352 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2353 if (i == 0) {
2354 /* build inline IO SG element */
2355 addr = bfa_os_sgaddr(sg_dma_address(sg));
2356 sge->sga = *(union bfi_addr_u *) &addr;
2357 pgdlen = sg_dma_len(sg);
2358 sge->sg_len = pgdlen;
2359 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
a36c61f9 2360 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
e3e7d3ee
MZ
2361 bfa_sge_to_be(sge);
2362 sge++;
2363 } else {
2364 if (sge_id == 0)
2365 sgpge = sgpg->sgpg->sges;
2366
2367 addr = bfa_os_sgaddr(sg_dma_address(sg));
2368 sgpge->sga = *(union bfi_addr_u *) &addr;
2369 sgpge->sg_len = sg_dma_len(sg);
2370 pgcumsz += sgpge->sg_len;
2371
2372 /* set flags */
2373 if (i < (ioim->nsges - 1) &&
2374 sge_id < (BFI_SGPG_DATA_SGES - 1))
2375 sgpge->flags = BFI_SGE_DATA;
2376 else if (i < (ioim->nsges - 1))
2377 sgpge->flags = BFI_SGE_DATA_CPL;
2378 else
2379 sgpge->flags = BFI_SGE_DATA_LAST;
2380
2381 bfa_sge_to_le(sgpge);
2382
2383 sgpge++;
2384 if (i == (ioim->nsges - 1)) {
2385 sgpge->flags = BFI_SGE_PGDLEN;
2386 sgpge->sga.a32.addr_lo = 0;
2387 sgpge->sga.a32.addr_hi = 0;
2388 sgpge->sg_len = pgcumsz;
2389 bfa_sge_to_le(sgpge);
2390 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2391 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2392 sgpge->flags = BFI_SGE_LINK;
2393 sgpge->sga = sgpg->sgpg_pa;
2394 sgpge->sg_len = pgcumsz;
2395 bfa_sge_to_le(sgpge);
2396 sge_id = 0;
2397 pgcumsz = 0;
2398 }
2399 }
a36c61f9
KG
2400 }
2401
2402 if (ioim->nsges > BFI_SGE_INLINE) {
2403 sge->sga = ioim->sgpg->sgpg_pa;
2404 } else {
2405 sge->sga.a32.addr_lo = 0;
2406 sge->sga.a32.addr_hi = 0;
2407 }
2408 sge->sg_len = pgdlen;
2409 sge->flags = BFI_SGE_PGDLEN;
2410 bfa_sge_to_be(sge);
2411
5fbe25c7 2412 /*
a36c61f9
KG
2413 * set up I/O command parameters
2414 */
6a18b167 2415 m->cmnd = cmnd_z0;
f314878a
MZ
2416 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2417 dmadir = cmnd->sc_data_direction;
2418 if (dmadir == DMA_TO_DEVICE)
2419 m->cmnd.iodir = FCP_IODIR_WRITE;
2420 else if (dmadir == DMA_FROM_DEVICE)
2421 m->cmnd.iodir = FCP_IODIR_READ;
2422 else
2423 m->cmnd.iodir = FCP_IODIR_NONE;
2424
2425 m->cmnd.cdb = *(scsi_cdb_t *) cmnd->cmnd;
2426 fcp_dl = scsi_bufflen(cmnd);
ba816ea8 2427 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
a36c61f9 2428
5fbe25c7 2429 /*
a36c61f9
KG
2430 * set up I/O message header
2431 */
2432 switch (m->cmnd.iodir) {
2433 case FCP_IODIR_READ:
2434 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2435 bfa_stats(itnim, input_reqs);
2436 ioim->itnim->stats.rd_throughput += fcp_dl;
2437 break;
2438 case FCP_IODIR_WRITE:
2439 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2440 bfa_stats(itnim, output_reqs);
2441 ioim->itnim->stats.wr_throughput += fcp_dl;
2442 break;
2443 case FCP_IODIR_RW:
2444 bfa_stats(itnim, input_reqs);
2445 bfa_stats(itnim, output_reqs);
2446 default:
2447 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2448 }
2449 if (itnim->seq_rec ||
f314878a 2450 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
a36c61f9
KG
2451 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2452
5fbe25c7 2453 /*
a36c61f9
KG
2454 * queue I/O message to firmware
2455 */
2456 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2457 return BFA_TRUE;
2458}
2459
5fbe25c7 2460/*
a36c61f9
KG
2461 * Setup any additional SG pages needed.Inline SG element is setup
2462 * at queuing time.
2463 */
2464static bfa_boolean_t
e3e7d3ee 2465bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
a36c61f9
KG
2466{
2467 u16 nsgpgs;
2468
2469 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2470
5fbe25c7 2471 /*
a36c61f9
KG
2472 * allocate SG pages needed
2473 */
2474 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2475 if (!nsgpgs)
2476 return BFA_TRUE;
2477
2478 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2479 != BFA_STATUS_OK) {
2480 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2481 return BFA_FALSE;
2482 }
2483
2484 ioim->nsgpgs = nsgpgs;
e3e7d3ee 2485 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
a36c61f9
KG
2486
2487 return BFA_TRUE;
2488}
2489
5fbe25c7 2490/*
a36c61f9
KG
2491 * Send I/O abort request to firmware.
2492 */
2493static bfa_boolean_t
2494bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2495{
2496 struct bfi_ioim_abort_req_s *m;
2497 enum bfi_ioim_h2i msgop;
2498
5fbe25c7 2499 /*
a36c61f9
KG
2500 * check for room in queue to send request now
2501 */
2502 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2503 if (!m)
2504 return BFA_FALSE;
2505
5fbe25c7 2506 /*
a36c61f9
KG
2507 * build i/o request message next
2508 */
2509 if (ioim->iosp->abort_explicit)
2510 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2511 else
2512 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2513
2514 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
ba816ea8 2515 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9
KG
2516 m->abort_tag = ++ioim->abort_tag;
2517
5fbe25c7 2518 /*
a36c61f9
KG
2519 * queue I/O message to firmware
2520 */
2521 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2522 return BFA_TRUE;
2523}
2524
5fbe25c7 2525/*
a36c61f9
KG
2526 * Call to resume any I/O requests waiting for room in request queue.
2527 */
2528static void
2529bfa_ioim_qresume(void *cbarg)
2530{
2531 struct bfa_ioim_s *ioim = cbarg;
2532
2533 bfa_stats(ioim->itnim, qresumes);
2534 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2535}
2536
2537
2538static void
2539bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2540{
5fbe25c7 2541 /*
a36c61f9
KG
2542 * Move IO from itnim queue to fcpim global queue since itnim will be
2543 * freed.
2544 */
2545 list_del(&ioim->qe);
2546 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2547
2548 if (!ioim->iosp->tskim) {
2549 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2550 bfa_cb_dequeue(&ioim->hcb_qe);
2551 list_del(&ioim->qe);
2552 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2553 }
2554 bfa_itnim_iodone(ioim->itnim);
2555 } else
f7f73812 2556 bfa_wc_down(&ioim->iosp->tskim->wc);
a36c61f9
KG
2557}
2558
2559static bfa_boolean_t
2560bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2561{
2562 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2563 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2564 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2565 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2566 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2567 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2568 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2569 return BFA_FALSE;
2570
2571 return BFA_TRUE;
2572}
2573
5fbe25c7 2574/*
a36c61f9
KG
2575 * or after the link comes back.
2576 */
2577void
2578bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2579{
5fbe25c7 2580 /*
a36c61f9
KG
2581 * If path tov timer expired, failback with PATHTOV status - these
2582 * IO requests are not normally retried by IO stack.
2583 *
2584 * Otherwise device cameback online and fail it with normal failed
2585 * status so that IO stack retries these failed IO requests.
2586 */
2587 if (iotov)
2588 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2589 else {
2590 ioim->io_cbfn = __bfa_cb_ioim_failed;
2591 bfa_stats(ioim->itnim, iocom_nexus_abort);
2592 }
2593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2594
5fbe25c7 2595 /*
a36c61f9
KG
2596 * Move IO to fcpim global queue since itnim will be
2597 * freed.
2598 */
2599 list_del(&ioim->qe);
2600 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2601}
2602
2603
5fbe25c7 2604/*
a36c61f9
KG
2605 * Memory allocation and initialization.
2606 */
2607void
2608bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2609{
2610 struct bfa_ioim_s *ioim;
2611 struct bfa_ioim_sp_s *iosp;
2612 u16 i;
2613 u8 *snsinfo;
2614 u32 snsbufsz;
2615
5fbe25c7 2616 /*
a36c61f9
KG
2617 * claim memory first
2618 */
2619 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2620 fcpim->ioim_arr = ioim;
2621 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2622
2623 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2624 fcpim->ioim_sp_arr = iosp;
2625 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2626
5fbe25c7 2627 /*
a36c61f9
KG
2628 * Claim DMA memory for per IO sense data.
2629 */
2630 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2631 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2632 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2633
2634 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2635 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2636 snsinfo = fcpim->snsbase.kva;
2637 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2638
5fbe25c7 2639 /*
a36c61f9
KG
2640 * Initialize ioim free queues
2641 */
2642 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2643 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2644 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2645
2646 for (i = 0; i < fcpim->num_ioim_reqs;
2647 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2648 /*
2649 * initialize IOIM
2650 */
6a18b167 2651 memset(ioim, 0, sizeof(struct bfa_ioim_s));
a36c61f9
KG
2652 ioim->iotag = i;
2653 ioim->bfa = fcpim->bfa;
2654 ioim->fcpim = fcpim;
2655 ioim->iosp = iosp;
2656 iosp->snsinfo = snsinfo;
2657 INIT_LIST_HEAD(&ioim->sgpg_q);
2658 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2659 bfa_ioim_qresume, ioim);
2660 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2661 bfa_ioim_sgpg_alloced, ioim);
2662 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2663
2664 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2665 }
2666}
2667
a36c61f9
KG
2668void
2669bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2670{
2671 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2672 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2673 struct bfa_ioim_s *ioim;
2674 u16 iotag;
2675 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2676
ba816ea8 2677 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
2678
2679 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2680 bfa_assert(ioim->iotag == iotag);
2681
2682 bfa_trc(ioim->bfa, ioim->iotag);
2683 bfa_trc(ioim->bfa, rsp->io_status);
2684 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2685
2686 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
6a18b167 2687 ioim->iosp->comp_rspmsg = *m;
a36c61f9
KG
2688
2689 switch (rsp->io_status) {
2690 case BFI_IOIM_STS_OK:
2691 bfa_stats(ioim->itnim, iocomp_ok);
2692 if (rsp->reuse_io_tag == 0)
2693 evt = BFA_IOIM_SM_DONE;
2694 else
2695 evt = BFA_IOIM_SM_COMP;
2696 break;
2697
2698 case BFI_IOIM_STS_TIMEDOUT:
2699 bfa_stats(ioim->itnim, iocomp_timedout);
2700 case BFI_IOIM_STS_ABORTED:
2701 rsp->io_status = BFI_IOIM_STS_ABORTED;
2702 bfa_stats(ioim->itnim, iocomp_aborted);
2703 if (rsp->reuse_io_tag == 0)
2704 evt = BFA_IOIM_SM_DONE;
2705 else
2706 evt = BFA_IOIM_SM_COMP;
2707 break;
2708
2709 case BFI_IOIM_STS_PROTO_ERR:
2710 bfa_stats(ioim->itnim, iocom_proto_err);
2711 bfa_assert(rsp->reuse_io_tag);
2712 evt = BFA_IOIM_SM_COMP;
2713 break;
2714
2715 case BFI_IOIM_STS_SQER_NEEDED:
2716 bfa_stats(ioim->itnim, iocom_sqer_needed);
2717 bfa_assert(rsp->reuse_io_tag == 0);
2718 evt = BFA_IOIM_SM_SQRETRY;
2719 break;
2720
2721 case BFI_IOIM_STS_RES_FREE:
2722 bfa_stats(ioim->itnim, iocom_res_free);
2723 evt = BFA_IOIM_SM_FREE;
2724 break;
2725
2726 case BFI_IOIM_STS_HOST_ABORTED:
2727 bfa_stats(ioim->itnim, iocom_hostabrts);
2728 if (rsp->abort_tag != ioim->abort_tag) {
2729 bfa_trc(ioim->bfa, rsp->abort_tag);
2730 bfa_trc(ioim->bfa, ioim->abort_tag);
2731 return;
2732 }
2733
2734 if (rsp->reuse_io_tag)
2735 evt = BFA_IOIM_SM_ABORT_COMP;
2736 else
2737 evt = BFA_IOIM_SM_ABORT_DONE;
2738 break;
2739
2740 case BFI_IOIM_STS_UTAG:
2741 bfa_stats(ioim->itnim, iocom_utags);
2742 evt = BFA_IOIM_SM_COMP_UTAG;
2743 break;
2744
2745 default:
2746 bfa_assert(0);
2747 }
2748
2749 bfa_sm_send_event(ioim, evt);
2750}
2751
2752void
2753bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2754{
2755 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2756 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2757 struct bfa_ioim_s *ioim;
2758 u16 iotag;
2759
ba816ea8 2760 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
2761
2762 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2763 bfa_assert(ioim->iotag == iotag);
2764
2765 bfa_trc_fp(ioim->bfa, ioim->iotag);
2766 bfa_ioim_cb_profile_comp(fcpim, ioim);
2767
2768 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2769}
2770
2771void
2772bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2773{
6a18b167 2774 ioim->start_time = jiffies;
a36c61f9
KG
2775}
2776
2777void
2778bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2779{
f314878a
MZ
2780 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2781 u32 fcp_dl = scsi_bufflen(cmnd);
a36c61f9 2782 u32 index = bfa_ioim_get_index(fcp_dl);
6a18b167 2783 u64 end_time = jiffies;
a36c61f9
KG
2784 struct bfa_itnim_latency_s *io_lat =
2785 &(ioim->itnim->ioprofile.io_latency);
2786 u32 val = (u32)(end_time - ioim->start_time);
2787
2788 bfa_itnim_ioprofile_update(ioim->itnim, index);
2789
2790 io_lat->count[index]++;
2791 io_lat->min[index] = (io_lat->min[index] < val) ?
2792 io_lat->min[index] : val;
2793 io_lat->max[index] = (io_lat->max[index] > val) ?
2794 io_lat->max[index] : val;
2795 io_lat->avg[index] += val;
2796}
5fbe25c7 2797/*
a36c61f9
KG
2798 * Called by itnim to clean up IO while going offline.
2799 */
2800void
2801bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2802{
2803 bfa_trc(ioim->bfa, ioim->iotag);
2804 bfa_stats(ioim->itnim, io_cleanups);
2805
2806 ioim->iosp->tskim = NULL;
2807 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2808}
2809
2810void
2811bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2812{
2813 bfa_trc(ioim->bfa, ioim->iotag);
2814 bfa_stats(ioim->itnim, io_tmaborts);
2815
2816 ioim->iosp->tskim = tskim;
2817 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2818}
2819
5fbe25c7 2820/*
a36c61f9
KG
2821 * IOC failure handling.
2822 */
2823void
2824bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2825{
2826 bfa_trc(ioim->bfa, ioim->iotag);
2827 bfa_stats(ioim->itnim, io_iocdowns);
2828 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2829}
2830
5fbe25c7 2831/*
a36c61f9
KG
2832 * IO offline TOV popped. Fail the pending IO.
2833 */
2834void
2835bfa_ioim_tov(struct bfa_ioim_s *ioim)
2836{
2837 bfa_trc(ioim->bfa, ioim->iotag);
2838 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2839}
2840
2841
5fbe25c7 2842/*
a36c61f9
KG
2843 * Allocate IOIM resource for initiator mode I/O request.
2844 */
2845struct bfa_ioim_s *
2846bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2847 struct bfa_itnim_s *itnim, u16 nsges)
2848{
2849 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2850 struct bfa_ioim_s *ioim;
2851
5fbe25c7 2852 /*
a36c61f9
KG
2853 * alocate IOIM resource
2854 */
2855 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2856 if (!ioim) {
2857 bfa_stats(itnim, no_iotags);
2858 return NULL;
2859 }
2860
2861 ioim->dio = dio;
2862 ioim->itnim = itnim;
2863 ioim->nsges = nsges;
2864 ioim->nsgpgs = 0;
2865
2866 bfa_stats(itnim, total_ios);
2867 fcpim->ios_active++;
2868
2869 list_add_tail(&ioim->qe, &itnim->io_q);
2870 bfa_trc_fp(ioim->bfa, ioim->iotag);
2871
2872 return ioim;
2873}
2874
2875void
2876bfa_ioim_free(struct bfa_ioim_s *ioim)
2877{
2878 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2879
2880 bfa_trc_fp(ioim->bfa, ioim->iotag);
2881 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2882
2883 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2884 (ioim->nsges > BFI_SGE_INLINE));
2885
2886 if (ioim->nsgpgs > 0)
2887 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2888
2889 bfa_stats(ioim->itnim, io_comps);
2890 fcpim->ios_active--;
2891
2892 list_del(&ioim->qe);
2893 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2894}
2895
2896void
2897bfa_ioim_start(struct bfa_ioim_s *ioim)
2898{
2899 bfa_trc_fp(ioim->bfa, ioim->iotag);
2900
2901 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2902
5fbe25c7 2903 /*
a36c61f9
KG
2904 * Obtain the queue over which this request has to be issued
2905 */
2906 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
f314878a 2907 BFA_FALSE : bfa_itnim_get_reqq(ioim);
a36c61f9
KG
2908
2909 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2910}
2911
5fbe25c7 2912/*
a36c61f9
KG
2913 * Driver I/O abort request.
2914 */
2915bfa_status_t
2916bfa_ioim_abort(struct bfa_ioim_s *ioim)
2917{
2918
2919 bfa_trc(ioim->bfa, ioim->iotag);
2920
2921 if (!bfa_ioim_is_abortable(ioim))
2922 return BFA_STATUS_FAILED;
2923
2924 bfa_stats(ioim->itnim, io_aborts);
2925 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2926
2927 return BFA_STATUS_OK;
2928}
2929
2930
5fbe25c7 2931/*
a36c61f9
KG
2932 * BFA TSKIM state machine functions
2933 */
2934
5fbe25c7 2935/*
a36c61f9
KG
2936 * Task management command beginning state.
2937 */
2938static void
2939bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2940{
2941 bfa_trc(tskim->bfa, event);
2942
2943 switch (event) {
2944 case BFA_TSKIM_SM_START:
2945 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2946 bfa_tskim_gather_ios(tskim);
2947
5fbe25c7 2948 /*
a36c61f9
KG
2949 * If device is offline, do not send TM on wire. Just cleanup
2950 * any pending IO requests and complete TM request.
2951 */
2952 if (!bfa_itnim_is_online(tskim->itnim)) {
2953 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2954 tskim->tsk_status = BFI_TSKIM_STS_OK;
2955 bfa_tskim_cleanup_ios(tskim);
2956 return;
2957 }
2958
2959 if (!bfa_tskim_send(tskim)) {
2960 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2961 bfa_stats(tskim->itnim, tm_qwait);
2962 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2963 &tskim->reqq_wait);
2964 }
2965 break;
2966
2967 default:
2968 bfa_sm_fault(tskim->bfa, event);
2969 }
2970}
2971
5fbe25c7 2972/*
a36c61f9
KG
2973 * brief
2974 * TM command is active, awaiting completion from firmware to
2975 * cleanup IO requests in TM scope.
2976 */
2977static void
2978bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2979{
2980 bfa_trc(tskim->bfa, event);
2981
2982 switch (event) {
2983 case BFA_TSKIM_SM_DONE:
2984 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2985 bfa_tskim_cleanup_ios(tskim);
2986 break;
2987
2988 case BFA_TSKIM_SM_CLEANUP:
2989 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2990 if (!bfa_tskim_send_abort(tskim)) {
2991 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2992 bfa_stats(tskim->itnim, tm_qwait);
2993 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2994 &tskim->reqq_wait);
2995 }
2996 break;
2997
2998 case BFA_TSKIM_SM_HWFAIL:
2999 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3000 bfa_tskim_iocdisable_ios(tskim);
3001 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3002 break;
3003
3004 default:
3005 bfa_sm_fault(tskim->bfa, event);
3006 }
3007}
3008
5fbe25c7 3009/*
a36c61f9
KG
3010 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3011 * completion event from firmware.
3012 */
3013static void
3014bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3015{
3016 bfa_trc(tskim->bfa, event);
3017
3018 switch (event) {
3019 case BFA_TSKIM_SM_DONE:
5fbe25c7 3020 /*
a36c61f9
KG
3021 * Ignore and wait for ABORT completion from firmware.
3022 */
3023 break;
3024
3025 case BFA_TSKIM_SM_CLEANUP_DONE:
3026 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3027 bfa_tskim_cleanup_ios(tskim);
3028 break;
3029
3030 case BFA_TSKIM_SM_HWFAIL:
3031 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3032 bfa_tskim_iocdisable_ios(tskim);
3033 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3034 break;
3035
3036 default:
3037 bfa_sm_fault(tskim->bfa, event);
3038 }
3039}
3040
3041static void
3042bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3043{
3044 bfa_trc(tskim->bfa, event);
3045
3046 switch (event) {
3047 case BFA_TSKIM_SM_IOS_DONE:
3048 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3049 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3050 break;
3051
3052 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3053 /*
a36c61f9
KG
3054 * Ignore, TM command completed on wire.
3055 * Notify TM conmpletion on IO cleanup completion.
3056 */
3057 break;
3058
3059 case BFA_TSKIM_SM_HWFAIL:
3060 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3061 bfa_tskim_iocdisable_ios(tskim);
3062 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3063 break;
3064
3065 default:
3066 bfa_sm_fault(tskim->bfa, event);
3067 }
3068}
3069
5fbe25c7 3070/*
a36c61f9
KG
3071 * Task management command is waiting for room in request CQ
3072 */
3073static void
3074bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3075{
3076 bfa_trc(tskim->bfa, event);
3077
3078 switch (event) {
3079 case BFA_TSKIM_SM_QRESUME:
3080 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3081 bfa_tskim_send(tskim);
3082 break;
3083
3084 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3085 /*
a36c61f9
KG
3086 * No need to send TM on wire since ITN is offline.
3087 */
3088 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3089 bfa_reqq_wcancel(&tskim->reqq_wait);
3090 bfa_tskim_cleanup_ios(tskim);
3091 break;
3092
3093 case BFA_TSKIM_SM_HWFAIL:
3094 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3095 bfa_reqq_wcancel(&tskim->reqq_wait);
3096 bfa_tskim_iocdisable_ios(tskim);
3097 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3098 break;
3099
3100 default:
3101 bfa_sm_fault(tskim->bfa, event);
3102 }
3103}
3104
5fbe25c7 3105/*
a36c61f9
KG
3106 * Task management command is active, awaiting for room in request CQ
3107 * to send clean up request.
3108 */
3109static void
3110bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3111 enum bfa_tskim_event event)
3112{
3113 bfa_trc(tskim->bfa, event);
3114
3115 switch (event) {
3116 case BFA_TSKIM_SM_DONE:
3117 bfa_reqq_wcancel(&tskim->reqq_wait);
5fbe25c7 3118 /*
a36c61f9
KG
3119 *
3120 * Fall through !!!
3121 */
3122
3123 case BFA_TSKIM_SM_QRESUME:
3124 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3125 bfa_tskim_send_abort(tskim);
3126 break;
3127
3128 case BFA_TSKIM_SM_HWFAIL:
3129 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3130 bfa_reqq_wcancel(&tskim->reqq_wait);
3131 bfa_tskim_iocdisable_ios(tskim);
3132 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3133 break;
3134
3135 default:
3136 bfa_sm_fault(tskim->bfa, event);
3137 }
3138}
3139
5fbe25c7 3140/*
a36c61f9
KG
3141 * BFA callback is pending
3142 */
3143static void
3144bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3145{
3146 bfa_trc(tskim->bfa, event);
3147
3148 switch (event) {
3149 case BFA_TSKIM_SM_HCB:
3150 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3151 bfa_tskim_free(tskim);
3152 break;
3153
3154 case BFA_TSKIM_SM_CLEANUP:
3155 bfa_tskim_notify_comp(tskim);
3156 break;
3157
3158 case BFA_TSKIM_SM_HWFAIL:
3159 break;
3160
3161 default:
3162 bfa_sm_fault(tskim->bfa, event);
3163 }
3164}
3165
3166
a36c61f9
KG
3167static void
3168__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3169{
3170 struct bfa_tskim_s *tskim = cbarg;
3171
3172 if (!complete) {
3173 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3174 return;
3175 }
3176
3177 bfa_stats(tskim->itnim, tm_success);
3178 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3179}
3180
3181static void
3182__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3183{
3184 struct bfa_tskim_s *tskim = cbarg;
3185
3186 if (!complete) {
3187 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3188 return;
3189 }
3190
3191 bfa_stats(tskim->itnim, tm_failures);
3192 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3193 BFI_TSKIM_STS_FAILED);
3194}
3195
3196static bfa_boolean_t
f314878a 3197bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
a36c61f9
KG
3198{
3199 switch (tskim->tm_cmnd) {
3200 case FCP_TM_TARGET_RESET:
3201 return BFA_TRUE;
3202
3203 case FCP_TM_ABORT_TASK_SET:
3204 case FCP_TM_CLEAR_TASK_SET:
3205 case FCP_TM_LUN_RESET:
3206 case FCP_TM_CLEAR_ACA:
f314878a 3207 return (!memcmp(&tskim->lun, &lun, sizeof(lun)));
a36c61f9
KG
3208
3209 default:
3210 bfa_assert(0);
3211 }
3212
3213 return BFA_FALSE;
3214}
3215
5fbe25c7 3216/*
a36c61f9
KG
3217 * Gather affected IO requests and task management commands.
3218 */
3219static void
3220bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3221{
3222 struct bfa_itnim_s *itnim = tskim->itnim;
3223 struct bfa_ioim_s *ioim;
f314878a
MZ
3224 struct list_head *qe, *qen;
3225 struct scsi_cmnd *cmnd;
3226 struct scsi_lun scsilun;
a36c61f9
KG
3227
3228 INIT_LIST_HEAD(&tskim->io_q);
3229
5fbe25c7 3230 /*
a36c61f9
KG
3231 * Gather any active IO requests first.
3232 */
3233 list_for_each_safe(qe, qen, &itnim->io_q) {
3234 ioim = (struct bfa_ioim_s *) qe;
f314878a
MZ
3235 cmnd = (struct scsi_cmnd *) ioim->dio;
3236 int_to_scsilun(cmnd->device->lun, &scsilun);
3237 if (bfa_tskim_match_scope(tskim, scsilun)) {
a36c61f9
KG
3238 list_del(&ioim->qe);
3239 list_add_tail(&ioim->qe, &tskim->io_q);
3240 }
3241 }
3242
5fbe25c7 3243 /*
a36c61f9
KG
3244 * Failback any pending IO requests immediately.
3245 */
3246 list_for_each_safe(qe, qen, &itnim->pending_q) {
3247 ioim = (struct bfa_ioim_s *) qe;
f314878a
MZ
3248 cmnd = (struct scsi_cmnd *) ioim->dio;
3249 int_to_scsilun(cmnd->device->lun, &scsilun);
3250 if (bfa_tskim_match_scope(tskim, scsilun)) {
a36c61f9
KG
3251 list_del(&ioim->qe);
3252 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3253 bfa_ioim_tov(ioim);
3254 }
3255 }
3256}
3257
5fbe25c7 3258/*
a36c61f9
KG
3259 * IO cleanup completion
3260 */
3261static void
3262bfa_tskim_cleanp_comp(void *tskim_cbarg)
3263{
3264 struct bfa_tskim_s *tskim = tskim_cbarg;
3265
3266 bfa_stats(tskim->itnim, tm_io_comps);
3267 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3268}
3269
5fbe25c7 3270/*
a36c61f9
KG
3271 * Gather affected IO requests and task management commands.
3272 */
3273static void
3274bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3275{
3276 struct bfa_ioim_s *ioim;
3277 struct list_head *qe, *qen;
3278
3279 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3280
3281 list_for_each_safe(qe, qen, &tskim->io_q) {
3282 ioim = (struct bfa_ioim_s *) qe;
3283 bfa_wc_up(&tskim->wc);
3284 bfa_ioim_cleanup_tm(ioim, tskim);
3285 }
3286
3287 bfa_wc_wait(&tskim->wc);
3288}
3289
5fbe25c7 3290/*
a36c61f9
KG
3291 * Send task management request to firmware.
3292 */
3293static bfa_boolean_t
3294bfa_tskim_send(struct bfa_tskim_s *tskim)
3295{
3296 struct bfa_itnim_s *itnim = tskim->itnim;
3297 struct bfi_tskim_req_s *m;
3298
5fbe25c7 3299 /*
a36c61f9
KG
3300 * check for room in queue to send request now
3301 */
3302 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3303 if (!m)
3304 return BFA_FALSE;
3305
5fbe25c7 3306 /*
a36c61f9
KG
3307 * build i/o request message next
3308 */
3309 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3310 bfa_lpuid(tskim->bfa));
3311
ba816ea8 3312 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9
KG
3313 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3314 m->t_secs = tskim->tsecs;
3315 m->lun = tskim->lun;
3316 m->tm_flags = tskim->tm_cmnd;
3317
5fbe25c7 3318 /*
a36c61f9
KG
3319 * queue I/O message to firmware
3320 */
3321 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3322 return BFA_TRUE;
3323}
3324
5fbe25c7 3325/*
a36c61f9
KG
3326 * Send abort request to cleanup an active TM to firmware.
3327 */
3328static bfa_boolean_t
3329bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3330{
3331 struct bfa_itnim_s *itnim = tskim->itnim;
3332 struct bfi_tskim_abortreq_s *m;
3333
5fbe25c7 3334 /*
a36c61f9
KG
3335 * check for room in queue to send request now
3336 */
3337 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3338 if (!m)
3339 return BFA_FALSE;
3340
5fbe25c7 3341 /*
a36c61f9
KG
3342 * build i/o request message next
3343 */
3344 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3345 bfa_lpuid(tskim->bfa));
3346
ba816ea8 3347 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9 3348
5fbe25c7 3349 /*
a36c61f9
KG
3350 * queue I/O message to firmware
3351 */
3352 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3353 return BFA_TRUE;
3354}
3355
5fbe25c7 3356/*
a36c61f9
KG
3357 * Call to resume task management cmnd waiting for room in request queue.
3358 */
3359static void
3360bfa_tskim_qresume(void *cbarg)
3361{
3362 struct bfa_tskim_s *tskim = cbarg;
3363
3364 bfa_stats(tskim->itnim, tm_qresumes);
3365 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3366}
3367
5fbe25c7 3368/*
a36c61f9
KG
3369 * Cleanup IOs associated with a task mangement command on IOC failures.
3370 */
3371static void
3372bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3373{
3374 struct bfa_ioim_s *ioim;
3375 struct list_head *qe, *qen;
3376
3377 list_for_each_safe(qe, qen, &tskim->io_q) {
3378 ioim = (struct bfa_ioim_s *) qe;
3379 bfa_ioim_iocdisable(ioim);
3380 }
3381}
3382
3383
5fbe25c7 3384/*
a36c61f9
KG
3385 * Notification on completions from related ioim.
3386 */
3387void
3388bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3389{
3390 bfa_wc_down(&tskim->wc);
3391}
3392
5fbe25c7 3393/*
a36c61f9
KG
3394 * Handle IOC h/w failure notification from itnim.
3395 */
3396void
3397bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3398{
3399 tskim->notify = BFA_FALSE;
3400 bfa_stats(tskim->itnim, tm_iocdowns);
3401 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3402}
3403
5fbe25c7 3404/*
a36c61f9
KG
3405 * Cleanup TM command and associated IOs as part of ITNIM offline.
3406 */
3407void
3408bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3409{
3410 tskim->notify = BFA_TRUE;
3411 bfa_stats(tskim->itnim, tm_cleanups);
3412 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3413}
3414
5fbe25c7 3415/*
a36c61f9
KG
3416 * Memory allocation and initialization.
3417 */
3418void
3419bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3420{
3421 struct bfa_tskim_s *tskim;
3422 u16 i;
3423
3424 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3425
3426 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3427 fcpim->tskim_arr = tskim;
3428
3429 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3430 /*
3431 * initialize TSKIM
3432 */
6a18b167 3433 memset(tskim, 0, sizeof(struct bfa_tskim_s));
a36c61f9
KG
3434 tskim->tsk_tag = i;
3435 tskim->bfa = fcpim->bfa;
3436 tskim->fcpim = fcpim;
3437 tskim->notify = BFA_FALSE;
3438 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3439 tskim);
3440 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3441
3442 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3443 }
3444
3445 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3446}
3447
a36c61f9
KG
3448void
3449bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3450{
3451 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3452 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3453 struct bfa_tskim_s *tskim;
ba816ea8 3454 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
a36c61f9
KG
3455
3456 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3457 bfa_assert(tskim->tsk_tag == tsk_tag);
3458
3459 tskim->tsk_status = rsp->tsk_status;
3460
5fbe25c7 3461 /*
a36c61f9
KG
3462 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3463 * requests. All other statuses are for normal completions.
3464 */
3465 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3466 bfa_stats(tskim->itnim, tm_cleanup_comps);
3467 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3468 } else {
3469 bfa_stats(tskim->itnim, tm_fw_rsps);
3470 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3471 }
3472}
3473
3474
a36c61f9
KG
3475struct bfa_tskim_s *
3476bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3477{
3478 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3479 struct bfa_tskim_s *tskim;
3480
3481 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3482
3483 if (tskim)
3484 tskim->dtsk = dtsk;
3485
3486 return tskim;
3487}
3488
3489void
3490bfa_tskim_free(struct bfa_tskim_s *tskim)
3491{
3492 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3493 list_del(&tskim->qe);
3494 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3495}
3496
5fbe25c7 3497/*
a36c61f9
KG
3498 * Start a task management command.
3499 *
3500 * @param[in] tskim BFA task management command instance
3501 * @param[in] itnim i-t nexus for the task management command
3502 * @param[in] lun lun, if applicable
3503 * @param[in] tm_cmnd Task management command code.
3504 * @param[in] t_secs Timeout in seconds
3505 *
3506 * @return None.
3507 */
3508void
f314878a
MZ
3509bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3510 struct scsi_lun lun,
a36c61f9
KG
3511 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3512{
3513 tskim->itnim = itnim;
3514 tskim->lun = lun;
3515 tskim->tm_cmnd = tm_cmnd;
3516 tskim->tsecs = tsecs;
3517 tskim->notify = BFA_FALSE;
3518 bfa_stats(itnim, tm_cmnds);
3519
3520 list_add_tail(&tskim->qe, &itnim->tsk_q);
3521 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3522}