]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/bfa/bfa_fcpim.c
[SCSI] fcoe: remove double check if skb is nonlinear
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / bfa / bfa_fcpim.c
CommitLineData
7725ccfd 1/*
a36c61f9 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
7725ccfd
JH
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
f16a1750 18#include "bfad_drv.h"
a36c61f9 19#include "bfa_modules.h"
7725ccfd
JH
20
21BFA_TRC_FILE(HAL, FCPIM);
7725ccfd 22
5fbe25c7 23/*
a36c61f9
KG
24 * BFA ITNIM Related definitions
25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
83763d59
KG
27static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29static void bfa_ioim_lm_init(struct bfa_s *bfa);
a36c61f9
KG
30
31#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
32 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
33
34#define bfa_fcpim_additn(__itnim) \
35 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
36#define bfa_fcpim_delitn(__itnim) do { \
d4b671c5 37 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
a36c61f9
KG
38 bfa_itnim_update_del_itn_stats(__itnim); \
39 list_del(&(__itnim)->qe); \
d4b671c5
JH
40 WARN_ON(!list_empty(&(__itnim)->io_q)); \
41 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
42 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
a36c61f9
KG
43} while (0)
44
45#define bfa_itnim_online_cb(__itnim) do { \
46 if ((__itnim)->bfa->fcs) \
47 bfa_cb_itnim_online((__itnim)->ditn); \
48 else { \
49 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
50 __bfa_cb_itnim_online, (__itnim)); \
51 } \
52} while (0)
53
54#define bfa_itnim_offline_cb(__itnim) do { \
55 if ((__itnim)->bfa->fcs) \
56 bfa_cb_itnim_offline((__itnim)->ditn); \
57 else { \
58 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
59 __bfa_cb_itnim_offline, (__itnim)); \
60 } \
61} while (0)
62
83763d59
KG
63#define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
66
67#define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
70
a36c61f9
KG
71#define bfa_itnim_sler_cb(__itnim) do { \
72 if ((__itnim)->bfa->fcs) \
73 bfa_cb_itnim_sler((__itnim)->ditn); \
74 else { \
75 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
76 __bfa_cb_itnim_sler, (__itnim)); \
77 } \
78} while (0)
79
83763d59
KG
80enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
85};
86
87enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1,
90};
91
5fbe25c7 92/*
da99dcc9 93 * itnim state machine event
a36c61f9 94 */
a36c61f9
KG
95enum bfa_itnim_event {
96 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
97 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
98 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
99 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
100 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
101 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
102 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
103 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
104 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
105};
106
5fbe25c7 107/*
a36c61f9
KG
108 * BFA IOIM related definitions
109 */
110#define bfa_ioim_move_to_comp_q(__ioim) do { \
111 list_del(&(__ioim)->qe); \
112 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
113} while (0)
114
115
116#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
117 if ((__fcpim)->profile_comp) \
118 (__fcpim)->profile_comp(__ioim); \
119} while (0)
120
121#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
122 if ((__fcpim)->profile_start) \
123 (__fcpim)->profile_start(__ioim); \
124} while (0)
a36c61f9 125
5fbe25c7 126/*
a36c61f9
KG
127 * IO state machine events
128 */
129enum bfa_ioim_event {
130 BFA_IOIM_SM_START = 1, /* io start request from host */
131 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
132 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
133 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
134 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
135 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
136 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
137 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
138 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
139 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
140 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
141 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
142 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
143 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
144 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
83763d59
KG
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
a36c61f9
KG
151};
152
153
5fbe25c7 154/*
a36c61f9
KG
155 * BFA TSKIM related definitions
156 */
157
5fbe25c7 158/*
a36c61f9
KG
159 * task management completion handling
160 */
161#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
162 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
163 bfa_tskim_notify_comp(__tskim); \
164} while (0)
165
166#define bfa_tskim_notify_comp(__tskim) do { \
167 if ((__tskim)->notify) \
168 bfa_itnim_tskdone((__tskim)->itnim); \
169} while (0)
170
171
172enum bfa_tskim_event {
173 BFA_TSKIM_SM_START = 1, /* TM command start */
174 BFA_TSKIM_SM_DONE = 2, /* TM completion */
175 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
176 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
177 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
178 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
179 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
180 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
181};
182
5fbe25c7 183/*
a36c61f9
KG
184 * forward declaration for BFA ITNIM functions
185 */
186static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
187static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
188static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
189static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
190static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
191static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
192static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
193static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
194static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
195static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
196static void bfa_itnim_iotov(void *itnim_arg);
197static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
198static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
199static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
200
5fbe25c7 201/*
a36c61f9
KG
202 * forward declaration of ITNIM state machine
203 */
204static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
209 enum bfa_itnim_event event);
210static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
211 enum bfa_itnim_event event);
212static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
213 enum bfa_itnim_event event);
214static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
215 enum bfa_itnim_event event);
216static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
217 enum bfa_itnim_event event);
218static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
219 enum bfa_itnim_event event);
220static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
221 enum bfa_itnim_event event);
222static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
223 enum bfa_itnim_event event);
224static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
225 enum bfa_itnim_event event);
226static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
227 enum bfa_itnim_event event);
228static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
229 enum bfa_itnim_event event);
230static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
231 enum bfa_itnim_event event);
232static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
233 enum bfa_itnim_event event);
234
5fbe25c7 235/*
a36c61f9
KG
236 * forward declaration for BFA IOIM functions
237 */
238static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
e3e7d3ee 239static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
a36c61f9
KG
240static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
241static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
242static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
243static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
244static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
245static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
246static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
247static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
83763d59
KG
248static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
a36c61f9 251
5fbe25c7 252/*
a36c61f9
KG
253 * forward declaration of BFA IO state machine
254 */
255static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
260 enum bfa_ioim_event event);
261static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event);
263static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
264 enum bfa_ioim_event event);
265static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
266 enum bfa_ioim_event event);
267static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
268 enum bfa_ioim_event event);
269static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
270 enum bfa_ioim_event event);
271static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
272 enum bfa_ioim_event event);
273static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
274 enum bfa_ioim_event event);
275static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
276 enum bfa_ioim_event event);
277static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
278 enum bfa_ioim_event event);
5fbe25c7 279/*
a36c61f9
KG
280 * forward declaration for BFA TSKIM functions
281 */
282static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
283static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
284static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
f314878a 285 struct scsi_lun lun);
a36c61f9
KG
286static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
287static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
288static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
289static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
290static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
291static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
292
5fbe25c7 293/*
a36c61f9
KG
294 * forward declaration of BFA TSKIM state machine
295 */
296static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
297 enum bfa_tskim_event event);
298static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
299 enum bfa_tskim_event event);
300static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
301 enum bfa_tskim_event event);
302static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
303 enum bfa_tskim_event event);
304static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
305 enum bfa_tskim_event event);
306static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
307 enum bfa_tskim_event event);
308static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
309 enum bfa_tskim_event event);
5fbe25c7 310/*
df0f1933 311 * BFA FCP Initiator Mode module
7725ccfd
JH
312 */
313
5fbe25c7 314/*
da99dcc9 315 * Compute and return memory needed by FCP(im) module.
7725ccfd
JH
316 */
317static void
4507025d 318bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
7725ccfd 319{
4507025d 320 bfa_itnim_meminfo(cfg, km_len);
7725ccfd 321
5fbe25c7 322 /*
7725ccfd
JH
323 * IO memory
324 */
7725ccfd
JH
325 *km_len += cfg->fwcfg.num_ioim_reqs *
326 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
327
5fbe25c7 328 /*
7725ccfd
JH
329 * task management command memory
330 */
331 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
332 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
333 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
334}
335
336
337static void
e2187d7f 338bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
4507025d 339 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
7725ccfd 340{
e2187d7f
KG
341 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
342 struct bfa_s *bfa = fcp->bfa;
7725ccfd
JH
343
344 bfa_trc(bfa, cfg->drvcfg.path_tov);
345 bfa_trc(bfa, cfg->fwcfg.num_rports);
346 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
347 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
348
e2187d7f 349 fcpim->fcp = fcp;
a36c61f9
KG
350 fcpim->bfa = bfa;
351 fcpim->num_itnims = cfg->fwcfg.num_rports;
7725ccfd 352 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
a36c61f9
KG
353 fcpim->path_tov = cfg->drvcfg.path_tov;
354 fcpim->delay_comp = cfg->drvcfg.delay_comp;
355 fcpim->profile_comp = NULL;
356 fcpim->profile_start = NULL;
7725ccfd 357
4507025d
KG
358 bfa_itnim_attach(fcpim);
359 bfa_tskim_attach(fcpim);
360 bfa_ioim_attach(fcpim);
7725ccfd
JH
361}
362
7725ccfd 363static void
e2187d7f 364bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
7725ccfd 365{
e2187d7f 366 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
7725ccfd 367 struct bfa_itnim_s *itnim;
a36c61f9 368 struct list_head *qe, *qen;
7725ccfd 369
3fd45980
KG
370 /* Enqueue unused ioim resources to free_q */
371 list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
372
7725ccfd
JH
373 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
374 itnim = (struct bfa_itnim_s *) qe;
375 bfa_itnim_iocdisable(itnim);
376 }
377}
378
379void
380bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
381{
e2187d7f 382 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
7725ccfd
JH
383
384 fcpim->path_tov = path_tov * 1000;
385 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
386 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
387}
388
389u16
390bfa_fcpim_path_tov_get(struct bfa_s *bfa)
391{
e2187d7f 392 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
7725ccfd 393
f8ceafde 394 return fcpim->path_tov / 1000;
7725ccfd
JH
395}
396
60138066
KG
397#define bfa_fcpim_add_iostats(__l, __r, __stats) \
398 (__l->__stats += __r->__stats)
399
400void
401bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
402 struct bfa_itnim_iostats_s *rstats)
403{
404 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
405 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
406 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
407 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
408 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
409 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
410 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
411 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
412 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
413 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
414 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
415 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
416 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
417 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
418 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
419 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
420 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
421 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
422 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
423 bfa_fcpim_add_iostats(lstats, rstats, onlines);
424 bfa_fcpim_add_iostats(lstats, rstats, offlines);
425 bfa_fcpim_add_iostats(lstats, rstats, creates);
426 bfa_fcpim_add_iostats(lstats, rstats, deletes);
427 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
428 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
429 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
430 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
431 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
432 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
433 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
434 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
435 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
436 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
437 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
438 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
439 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
440 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
441 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
442 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
443 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
444 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
83763d59
KG
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
60138066
KG
454}
455
456bfa_status_t
457bfa_fcpim_port_iostats(struct bfa_s *bfa,
458 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
459{
460 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
461 struct list_head *qe, *qen;
462 struct bfa_itnim_s *itnim;
463
464 /* accumulate IO stats from itnim */
465 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
466 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
467 itnim = (struct bfa_itnim_s *) qe;
468 if (itnim->rport->rport_info.lp_tag != lp_tag)
469 continue;
470 bfa_fcpim_add_stats(stats, &(itnim->stats));
471 }
472 return BFA_STATUS_OK;
473}
474
42a8e6e2
KG
475void
476bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
477{
478 struct bfa_itnim_latency_s *io_lat =
479 &(ioim->itnim->ioprofile.io_latency);
480 u32 val, idx;
481
482 val = (u32)(jiffies - ioim->start_time);
483 idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
484 bfa_itnim_ioprofile_update(ioim->itnim, idx);
485
486 io_lat->count[idx]++;
487 io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
488 io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
489 io_lat->avg[idx] += val;
490}
491
492void
493bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
494{
495 ioim->start_time = jiffies;
496}
497
498bfa_status_t
499bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
500{
501 struct bfa_itnim_s *itnim;
502 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
503 struct list_head *qe, *qen;
504
505 /* accumulate IO stats from itnim */
506 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
507 itnim = (struct bfa_itnim_s *) qe;
508 bfa_itnim_clear_stats(itnim);
509 }
510 fcpim->io_profile = BFA_TRUE;
511 fcpim->io_profile_start_time = time;
512 fcpim->profile_comp = bfa_ioim_profile_comp;
513 fcpim->profile_start = bfa_ioim_profile_start;
514 return BFA_STATUS_OK;
515}
516
517bfa_status_t
518bfa_fcpim_profile_off(struct bfa_s *bfa)
519{
520 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
521 fcpim->io_profile = BFA_FALSE;
522 fcpim->io_profile_start_time = 0;
523 fcpim->profile_comp = NULL;
524 fcpim->profile_start = NULL;
525 return BFA_STATUS_OK;
526}
527
7725ccfd
JH
528u16
529bfa_fcpim_qdepth_get(struct bfa_s *bfa)
530{
e2187d7f 531 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
7725ccfd 532
f8ceafde 533 return fcpim->q_depth;
7725ccfd
JH
534}
535
5fbe25c7 536/*
a36c61f9
KG
537 * BFA ITNIM module state machine functions
538 */
539
5fbe25c7 540/*
da99dcc9 541 * Beginning/unallocated state - no events expected.
a36c61f9
KG
542 */
543static void
544bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
545{
546 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
547 bfa_trc(itnim->bfa, event);
548
549 switch (event) {
550 case BFA_ITNIM_SM_CREATE:
551 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
552 itnim->is_online = BFA_FALSE;
553 bfa_fcpim_additn(itnim);
554 break;
555
556 default:
557 bfa_sm_fault(itnim->bfa, event);
558 }
559}
560
5fbe25c7 561/*
da99dcc9 562 * Beginning state, only online event expected.
a36c61f9
KG
563 */
564static void
565bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
566{
567 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
568 bfa_trc(itnim->bfa, event);
569
570 switch (event) {
571 case BFA_ITNIM_SM_ONLINE:
572 if (bfa_itnim_send_fwcreate(itnim))
573 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
574 else
575 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
576 break;
577
578 case BFA_ITNIM_SM_DELETE:
579 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
580 bfa_fcpim_delitn(itnim);
581 break;
582
583 case BFA_ITNIM_SM_HWFAIL:
584 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
585 break;
586
587 default:
588 bfa_sm_fault(itnim->bfa, event);
589 }
590}
591
5fbe25c7 592/*
a36c61f9
KG
593 * Waiting for itnim create response from firmware.
594 */
595static void
596bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
597{
598 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
599 bfa_trc(itnim->bfa, event);
600
601 switch (event) {
602 case BFA_ITNIM_SM_FWRSP:
603 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
604 itnim->is_online = BFA_TRUE;
605 bfa_itnim_iotov_online(itnim);
606 bfa_itnim_online_cb(itnim);
607 break;
608
609 case BFA_ITNIM_SM_DELETE:
610 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
611 break;
612
613 case BFA_ITNIM_SM_OFFLINE:
614 if (bfa_itnim_send_fwdelete(itnim))
615 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
616 else
617 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
618 break;
619
620 case BFA_ITNIM_SM_HWFAIL:
621 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
622 break;
623
624 default:
625 bfa_sm_fault(itnim->bfa, event);
626 }
627}
628
629static void
630bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
631 enum bfa_itnim_event event)
632{
633 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
634 bfa_trc(itnim->bfa, event);
635
636 switch (event) {
637 case BFA_ITNIM_SM_QRESUME:
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
639 bfa_itnim_send_fwcreate(itnim);
640 break;
641
642 case BFA_ITNIM_SM_DELETE:
643 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
644 bfa_reqq_wcancel(&itnim->reqq_wait);
645 bfa_fcpim_delitn(itnim);
646 break;
647
648 case BFA_ITNIM_SM_OFFLINE:
649 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
650 bfa_reqq_wcancel(&itnim->reqq_wait);
651 bfa_itnim_offline_cb(itnim);
652 break;
653
654 case BFA_ITNIM_SM_HWFAIL:
655 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
656 bfa_reqq_wcancel(&itnim->reqq_wait);
657 break;
658
659 default:
660 bfa_sm_fault(itnim->bfa, event);
661 }
662}
663
5fbe25c7 664/*
da99dcc9 665 * Waiting for itnim create response from firmware, a delete is pending.
a36c61f9
KG
666 */
667static void
668bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
669 enum bfa_itnim_event event)
670{
671 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
672 bfa_trc(itnim->bfa, event);
673
674 switch (event) {
675 case BFA_ITNIM_SM_FWRSP:
676 if (bfa_itnim_send_fwdelete(itnim))
677 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
678 else
679 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
680 break;
681
682 case BFA_ITNIM_SM_HWFAIL:
683 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
684 bfa_fcpim_delitn(itnim);
685 break;
686
687 default:
688 bfa_sm_fault(itnim->bfa, event);
689 }
690}
691
5fbe25c7 692/*
da99dcc9 693 * Online state - normal parking state.
a36c61f9
KG
694 */
695static void
696bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
697{
698 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
699 bfa_trc(itnim->bfa, event);
700
701 switch (event) {
702 case BFA_ITNIM_SM_OFFLINE:
703 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
704 itnim->is_online = BFA_FALSE;
705 bfa_itnim_iotov_start(itnim);
706 bfa_itnim_cleanup(itnim);
707 break;
708
709 case BFA_ITNIM_SM_DELETE:
710 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
711 itnim->is_online = BFA_FALSE;
712 bfa_itnim_cleanup(itnim);
713 break;
714
715 case BFA_ITNIM_SM_SLER:
716 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
717 itnim->is_online = BFA_FALSE;
718 bfa_itnim_iotov_start(itnim);
719 bfa_itnim_sler_cb(itnim);
720 break;
721
722 case BFA_ITNIM_SM_HWFAIL:
723 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
724 itnim->is_online = BFA_FALSE;
725 bfa_itnim_iotov_start(itnim);
726 bfa_itnim_iocdisable_cleanup(itnim);
727 break;
728
729 default:
730 bfa_sm_fault(itnim->bfa, event);
731 }
732}
733
5fbe25c7 734/*
da99dcc9 735 * Second level error recovery need.
a36c61f9
KG
736 */
737static void
738bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
739{
740 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
741 bfa_trc(itnim->bfa, event);
742
743 switch (event) {
744 case BFA_ITNIM_SM_OFFLINE:
745 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
746 bfa_itnim_cleanup(itnim);
747 break;
748
749 case BFA_ITNIM_SM_DELETE:
750 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
751 bfa_itnim_cleanup(itnim);
752 bfa_itnim_iotov_delete(itnim);
753 break;
754
755 case BFA_ITNIM_SM_HWFAIL:
756 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
757 bfa_itnim_iocdisable_cleanup(itnim);
758 break;
759
760 default:
761 bfa_sm_fault(itnim->bfa, event);
762 }
763}
764
5fbe25c7 765/*
da99dcc9 766 * Going offline. Waiting for active IO cleanup.
a36c61f9
KG
767 */
768static void
769bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
770 enum bfa_itnim_event event)
771{
772 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
773 bfa_trc(itnim->bfa, event);
774
775 switch (event) {
776 case BFA_ITNIM_SM_CLEANUP:
777 if (bfa_itnim_send_fwdelete(itnim))
778 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
779 else
780 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
781 break;
782
783 case BFA_ITNIM_SM_DELETE:
784 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
785 bfa_itnim_iotov_delete(itnim);
786 break;
787
788 case BFA_ITNIM_SM_HWFAIL:
789 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
790 bfa_itnim_iocdisable_cleanup(itnim);
791 bfa_itnim_offline_cb(itnim);
792 break;
793
794 case BFA_ITNIM_SM_SLER:
795 break;
796
797 default:
798 bfa_sm_fault(itnim->bfa, event);
799 }
800}
801
5fbe25c7 802/*
da99dcc9 803 * Deleting itnim. Waiting for active IO cleanup.
a36c61f9
KG
804 */
805static void
806bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
807 enum bfa_itnim_event event)
808{
809 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
810 bfa_trc(itnim->bfa, event);
811
812 switch (event) {
813 case BFA_ITNIM_SM_CLEANUP:
814 if (bfa_itnim_send_fwdelete(itnim))
815 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
816 else
817 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
818 break;
819
820 case BFA_ITNIM_SM_HWFAIL:
821 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
822 bfa_itnim_iocdisable_cleanup(itnim);
823 break;
824
825 default:
826 bfa_sm_fault(itnim->bfa, event);
827 }
828}
829
5fbe25c7 830/*
a36c61f9
KG
831 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
832 */
833static void
834bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
835{
836 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
837 bfa_trc(itnim->bfa, event);
838
839 switch (event) {
840 case BFA_ITNIM_SM_FWRSP:
841 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
842 bfa_itnim_offline_cb(itnim);
843 break;
844
845 case BFA_ITNIM_SM_DELETE:
846 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
847 break;
848
849 case BFA_ITNIM_SM_HWFAIL:
850 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
851 bfa_itnim_offline_cb(itnim);
852 break;
853
854 default:
855 bfa_sm_fault(itnim->bfa, event);
856 }
857}
858
859static void
860bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
861 enum bfa_itnim_event event)
862{
863 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
864 bfa_trc(itnim->bfa, event);
865
866 switch (event) {
867 case BFA_ITNIM_SM_QRESUME:
868 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
869 bfa_itnim_send_fwdelete(itnim);
870 break;
871
872 case BFA_ITNIM_SM_DELETE:
873 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
874 break;
875
876 case BFA_ITNIM_SM_HWFAIL:
877 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
878 bfa_reqq_wcancel(&itnim->reqq_wait);
879 bfa_itnim_offline_cb(itnim);
880 break;
881
882 default:
883 bfa_sm_fault(itnim->bfa, event);
884 }
885}
886
5fbe25c7 887/*
da99dcc9 888 * Offline state.
a36c61f9
KG
889 */
890static void
891bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
892{
893 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
894 bfa_trc(itnim->bfa, event);
895
896 switch (event) {
897 case BFA_ITNIM_SM_DELETE:
898 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
899 bfa_itnim_iotov_delete(itnim);
900 bfa_fcpim_delitn(itnim);
901 break;
902
903 case BFA_ITNIM_SM_ONLINE:
904 if (bfa_itnim_send_fwcreate(itnim))
905 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
906 else
907 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
908 break;
909
910 case BFA_ITNIM_SM_HWFAIL:
911 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
912 break;
913
914 default:
915 bfa_sm_fault(itnim->bfa, event);
916 }
917}
918
a36c61f9
KG
919static void
920bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
921 enum bfa_itnim_event event)
922{
923 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
924 bfa_trc(itnim->bfa, event);
925
926 switch (event) {
927 case BFA_ITNIM_SM_DELETE:
928 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
929 bfa_itnim_iotov_delete(itnim);
930 bfa_fcpim_delitn(itnim);
931 break;
932
933 case BFA_ITNIM_SM_OFFLINE:
934 bfa_itnim_offline_cb(itnim);
935 break;
936
937 case BFA_ITNIM_SM_ONLINE:
938 if (bfa_itnim_send_fwcreate(itnim))
939 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
940 else
941 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
942 break;
943
944 case BFA_ITNIM_SM_HWFAIL:
945 break;
946
947 default:
948 bfa_sm_fault(itnim->bfa, event);
949 }
950}
951
5fbe25c7 952/*
da99dcc9 953 * Itnim is deleted, waiting for firmware response to delete.
a36c61f9
KG
954 */
955static void
956bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
957{
958 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
959 bfa_trc(itnim->bfa, event);
960
961 switch (event) {
962 case BFA_ITNIM_SM_FWRSP:
963 case BFA_ITNIM_SM_HWFAIL:
964 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
965 bfa_fcpim_delitn(itnim);
966 break;
967
968 default:
969 bfa_sm_fault(itnim->bfa, event);
970 }
971}
972
973static void
974bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
975 enum bfa_itnim_event event)
976{
977 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
978 bfa_trc(itnim->bfa, event);
979
980 switch (event) {
981 case BFA_ITNIM_SM_QRESUME:
982 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
983 bfa_itnim_send_fwdelete(itnim);
984 break;
985
986 case BFA_ITNIM_SM_HWFAIL:
987 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
988 bfa_reqq_wcancel(&itnim->reqq_wait);
989 bfa_fcpim_delitn(itnim);
990 break;
991
992 default:
993 bfa_sm_fault(itnim->bfa, event);
994 }
995}
996
5fbe25c7 997/*
da99dcc9 998 * Initiate cleanup of all IOs on an IOC failure.
a36c61f9
KG
999 */
1000static void
1001bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1002{
1003 struct bfa_tskim_s *tskim;
1004 struct bfa_ioim_s *ioim;
1005 struct list_head *qe, *qen;
1006
1007 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1008 tskim = (struct bfa_tskim_s *) qe;
1009 bfa_tskim_iocdisable(tskim);
1010 }
1011
1012 list_for_each_safe(qe, qen, &itnim->io_q) {
1013 ioim = (struct bfa_ioim_s *) qe;
1014 bfa_ioim_iocdisable(ioim);
1015 }
1016
5fbe25c7 1017 /*
a36c61f9
KG
1018 * For IO request in pending queue, we pretend an early timeout.
1019 */
1020 list_for_each_safe(qe, qen, &itnim->pending_q) {
1021 ioim = (struct bfa_ioim_s *) qe;
1022 bfa_ioim_tov(ioim);
1023 }
1024
1025 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1026 ioim = (struct bfa_ioim_s *) qe;
1027 bfa_ioim_iocdisable(ioim);
1028 }
1029}
1030
5fbe25c7 1031/*
da99dcc9 1032 * IO cleanup completion
a36c61f9
KG
1033 */
1034static void
1035bfa_itnim_cleanp_comp(void *itnim_cbarg)
1036{
1037 struct bfa_itnim_s *itnim = itnim_cbarg;
1038
1039 bfa_stats(itnim, cleanup_comps);
1040 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1041}
1042
5fbe25c7 1043/*
da99dcc9 1044 * Initiate cleanup of all IOs.
a36c61f9
KG
1045 */
1046static void
1047bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1048{
1049 struct bfa_ioim_s *ioim;
1050 struct bfa_tskim_s *tskim;
1051 struct list_head *qe, *qen;
1052
1053 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1054
1055 list_for_each_safe(qe, qen, &itnim->io_q) {
1056 ioim = (struct bfa_ioim_s *) qe;
1057
5fbe25c7 1058 /*
a36c61f9
KG
1059 * Move IO to a cleanup queue from active queue so that a later
1060 * TM will not pickup this IO.
1061 */
1062 list_del(&ioim->qe);
1063 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1064
1065 bfa_wc_up(&itnim->wc);
1066 bfa_ioim_cleanup(ioim);
1067 }
1068
1069 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1070 tskim = (struct bfa_tskim_s *) qe;
1071 bfa_wc_up(&itnim->wc);
1072 bfa_tskim_cleanup(tskim);
1073 }
1074
1075 bfa_wc_wait(&itnim->wc);
1076}
1077
1078static void
1079__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1080{
1081 struct bfa_itnim_s *itnim = cbarg;
1082
1083 if (complete)
1084 bfa_cb_itnim_online(itnim->ditn);
1085}
1086
1087static void
1088__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1089{
1090 struct bfa_itnim_s *itnim = cbarg;
1091
1092 if (complete)
1093 bfa_cb_itnim_offline(itnim->ditn);
1094}
1095
1096static void
1097__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1098{
1099 struct bfa_itnim_s *itnim = cbarg;
1100
1101 if (complete)
1102 bfa_cb_itnim_sler(itnim->ditn);
1103}
1104
5fbe25c7 1105/*
a36c61f9
KG
1106 * Call to resume any I/O requests waiting for room in request queue.
1107 */
1108static void
1109bfa_itnim_qresume(void *cbarg)
1110{
1111 struct bfa_itnim_s *itnim = cbarg;
1112
1113 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1114}
1115
5fbe25c7 1116/*
a36c61f9
KG
1117 * bfa_itnim_public
1118 */
1119
1120void
1121bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1122{
1123 bfa_wc_down(&itnim->wc);
1124}
1125
1126void
1127bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1128{
1129 bfa_wc_down(&itnim->wc);
1130}
1131
1132void
4507025d 1133bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
a36c61f9 1134{
5fbe25c7 1135 /*
a36c61f9
KG
1136 * ITN memory
1137 */
1138 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1139}
1140
1141void
4507025d 1142bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
a36c61f9
KG
1143{
1144 struct bfa_s *bfa = fcpim->bfa;
4507025d 1145 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
a36c61f9
KG
1146 struct bfa_itnim_s *itnim;
1147 int i, j;
1148
1149 INIT_LIST_HEAD(&fcpim->itnim_q);
1150
4507025d 1151 itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
a36c61f9
KG
1152 fcpim->itnim_arr = itnim;
1153
1154 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
6a18b167 1155 memset(itnim, 0, sizeof(struct bfa_itnim_s));
a36c61f9
KG
1156 itnim->bfa = bfa;
1157 itnim->fcpim = fcpim;
1158 itnim->reqq = BFA_REQQ_QOS_LO;
1159 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1160 itnim->iotov_active = BFA_FALSE;
1161 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1162
1163 INIT_LIST_HEAD(&itnim->io_q);
1164 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1165 INIT_LIST_HEAD(&itnim->pending_q);
1166 INIT_LIST_HEAD(&itnim->tsk_q);
1167 INIT_LIST_HEAD(&itnim->delay_comp_q);
1168 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1169 itnim->ioprofile.io_latency.min[j] = ~0;
1170 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1171 }
1172
4507025d 1173 bfa_mem_kva_curp(fcp) = (u8 *) itnim;
a36c61f9
KG
1174}
1175
1176void
1177bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1178{
1179 bfa_stats(itnim, ioc_disabled);
1180 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1181}
1182
1183static bfa_boolean_t
1184bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1185{
dd5aaf45 1186 struct bfi_itn_create_req_s *m;
a36c61f9
KG
1187
1188 itnim->msg_no++;
1189
5fbe25c7 1190 /*
a36c61f9
KG
1191 * check for room in queue to send request now
1192 */
1193 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1194 if (!m) {
1195 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1196 return BFA_FALSE;
1197 }
1198
dd5aaf45 1199 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
3fd45980 1200 bfa_fn_lpu(itnim->bfa));
a36c61f9
KG
1201 m->fw_handle = itnim->rport->fw_handle;
1202 m->class = FC_CLASS_3;
1203 m->seq_rec = itnim->seq_rec;
1204 m->msg_no = itnim->msg_no;
1205 bfa_stats(itnim, fw_create);
1206
5fbe25c7 1207 /*
a36c61f9
KG
1208 * queue I/O message to firmware
1209 */
3fd45980 1210 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
a36c61f9
KG
1211 return BFA_TRUE;
1212}
1213
1214static bfa_boolean_t
1215bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1216{
dd5aaf45 1217 struct bfi_itn_delete_req_s *m;
a36c61f9 1218
5fbe25c7 1219 /*
a36c61f9
KG
1220 * check for room in queue to send request now
1221 */
1222 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1223 if (!m) {
1224 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1225 return BFA_FALSE;
1226 }
1227
dd5aaf45 1228 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
3fd45980 1229 bfa_fn_lpu(itnim->bfa));
a36c61f9
KG
1230 m->fw_handle = itnim->rport->fw_handle;
1231 bfa_stats(itnim, fw_delete);
1232
5fbe25c7 1233 /*
a36c61f9
KG
1234 * queue I/O message to firmware
1235 */
3fd45980 1236 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
a36c61f9
KG
1237 return BFA_TRUE;
1238}
1239
5fbe25c7 1240/*
a36c61f9
KG
1241 * Cleanup all pending failed inflight requests.
1242 */
1243static void
1244bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1245{
1246 struct bfa_ioim_s *ioim;
1247 struct list_head *qe, *qen;
1248
1249 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1250 ioim = (struct bfa_ioim_s *)qe;
1251 bfa_ioim_delayed_comp(ioim, iotov);
1252 }
1253}
1254
5fbe25c7 1255/*
a36c61f9
KG
1256 * Start all pending IO requests.
1257 */
1258static void
1259bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1260{
1261 struct bfa_ioim_s *ioim;
1262
1263 bfa_itnim_iotov_stop(itnim);
1264
5fbe25c7 1265 /*
a36c61f9
KG
1266 * Abort all inflight IO requests in the queue
1267 */
1268 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1269
5fbe25c7 1270 /*
a36c61f9
KG
1271 * Start all pending IO requests.
1272 */
1273 while (!list_empty(&itnim->pending_q)) {
1274 bfa_q_deq(&itnim->pending_q, &ioim);
1275 list_add_tail(&ioim->qe, &itnim->io_q);
1276 bfa_ioim_start(ioim);
1277 }
1278}
1279
5fbe25c7 1280/*
a36c61f9
KG
1281 * Fail all pending IO requests
1282 */
1283static void
1284bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1285{
1286 struct bfa_ioim_s *ioim;
1287
5fbe25c7 1288 /*
a36c61f9
KG
1289 * Fail all inflight IO requests in the queue
1290 */
1291 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1292
5fbe25c7 1293 /*
a36c61f9
KG
1294 * Fail any pending IO requests.
1295 */
1296 while (!list_empty(&itnim->pending_q)) {
1297 bfa_q_deq(&itnim->pending_q, &ioim);
1298 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1299 bfa_ioim_tov(ioim);
1300 }
1301}
1302
5fbe25c7 1303/*
a36c61f9
KG
1304 * IO TOV timer callback. Fail any pending IO requests.
1305 */
1306static void
1307bfa_itnim_iotov(void *itnim_arg)
1308{
1309 struct bfa_itnim_s *itnim = itnim_arg;
1310
1311 itnim->iotov_active = BFA_FALSE;
1312
1313 bfa_cb_itnim_tov_begin(itnim->ditn);
1314 bfa_itnim_iotov_cleanup(itnim);
1315 bfa_cb_itnim_tov(itnim->ditn);
1316}
1317
5fbe25c7 1318/*
a36c61f9
KG
1319 * Start IO TOV timer for failing back pending IO requests in offline state.
1320 */
1321static void
1322bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1323{
1324 if (itnim->fcpim->path_tov > 0) {
1325
1326 itnim->iotov_active = BFA_TRUE;
d4b671c5 1327 WARN_ON(!bfa_itnim_hold_io(itnim));
a36c61f9
KG
1328 bfa_timer_start(itnim->bfa, &itnim->timer,
1329 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1330 }
1331}
1332
5fbe25c7 1333/*
a36c61f9
KG
1334 * Stop IO TOV timer.
1335 */
1336static void
1337bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1338{
1339 if (itnim->iotov_active) {
1340 itnim->iotov_active = BFA_FALSE;
1341 bfa_timer_stop(&itnim->timer);
1342 }
1343}
1344
5fbe25c7 1345/*
a36c61f9
KG
1346 * Stop IO TOV timer.
1347 */
1348static void
1349bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1350{
1351 bfa_boolean_t pathtov_active = BFA_FALSE;
1352
1353 if (itnim->iotov_active)
1354 pathtov_active = BFA_TRUE;
1355
1356 bfa_itnim_iotov_stop(itnim);
1357 if (pathtov_active)
1358 bfa_cb_itnim_tov_begin(itnim->ditn);
1359 bfa_itnim_iotov_cleanup(itnim);
1360 if (pathtov_active)
1361 bfa_cb_itnim_tov(itnim->ditn);
1362}
1363
1364static void
1365bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1366{
e2187d7f 1367 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
a36c61f9
KG
1368 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1369 itnim->stats.iocomp_aborted;
1370 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1371 itnim->stats.iocomp_timedout;
1372 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1373 itnim->stats.iocom_sqer_needed;
1374 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1375 itnim->stats.iocom_res_free;
1376 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1377 itnim->stats.iocom_hostabrts;
1378 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1379 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1380 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1381}
1382
5fbe25c7 1383/*
da99dcc9 1384 * bfa_itnim_public
a36c61f9
KG
1385 */
1386
5fbe25c7 1387/*
da99dcc9 1388 * Itnim interrupt processing.
a36c61f9
KG
1389 */
1390void
1391bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1392{
e2187d7f 1393 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
dd5aaf45 1394 union bfi_itn_i2h_msg_u msg;
a36c61f9
KG
1395 struct bfa_itnim_s *itnim;
1396
1397 bfa_trc(bfa, m->mhdr.msg_id);
1398
1399 msg.msg = m;
1400
1401 switch (m->mhdr.msg_id) {
dd5aaf45 1402 case BFI_ITN_I2H_CREATE_RSP:
a36c61f9
KG
1403 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1404 msg.create_rsp->bfa_handle);
d4b671c5 1405 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
a36c61f9
KG
1406 bfa_stats(itnim, create_comps);
1407 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1408 break;
1409
dd5aaf45 1410 case BFI_ITN_I2H_DELETE_RSP:
a36c61f9
KG
1411 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1412 msg.delete_rsp->bfa_handle);
d4b671c5 1413 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
a36c61f9
KG
1414 bfa_stats(itnim, delete_comps);
1415 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1416 break;
1417
dd5aaf45 1418 case BFI_ITN_I2H_SLER_EVENT:
a36c61f9
KG
1419 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1420 msg.sler_event->bfa_handle);
1421 bfa_stats(itnim, sler_events);
1422 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1423 break;
1424
1425 default:
1426 bfa_trc(bfa, m->mhdr.msg_id);
d4b671c5 1427 WARN_ON(1);
a36c61f9
KG
1428 }
1429}
1430
5fbe25c7 1431/*
da99dcc9 1432 * bfa_itnim_api
a36c61f9
KG
1433 */
1434
1435struct bfa_itnim_s *
1436bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1437{
e2187d7f 1438 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
a36c61f9
KG
1439 struct bfa_itnim_s *itnim;
1440
e2187d7f
KG
1441 bfa_itn_create(bfa, rport, bfa_itnim_isr);
1442
a36c61f9 1443 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
d4b671c5 1444 WARN_ON(itnim->rport != rport);
a36c61f9
KG
1445
1446 itnim->ditn = ditn;
1447
1448 bfa_stats(itnim, creates);
1449 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1450
1451 return itnim;
1452}
1453
1454void
1455bfa_itnim_delete(struct bfa_itnim_s *itnim)
1456{
1457 bfa_stats(itnim, deletes);
1458 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1459}
1460
1461void
1462bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1463{
1464 itnim->seq_rec = seq_rec;
1465 bfa_stats(itnim, onlines);
1466 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1467}
1468
1469void
1470bfa_itnim_offline(struct bfa_itnim_s *itnim)
1471{
1472 bfa_stats(itnim, offlines);
1473 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1474}
1475
5fbe25c7 1476/*
a36c61f9
KG
1477 * Return true if itnim is considered offline for holding off IO request.
1478 * IO is not held if itnim is being deleted.
1479 */
1480bfa_boolean_t
1481bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1482{
1483 return itnim->fcpim->path_tov && itnim->iotov_active &&
1484 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1485 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1486 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1487 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1488 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1489 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1490}
1491
42a8e6e2
KG
1492#define bfa_io_lat_clock_res_div HZ
1493#define bfa_io_lat_clock_res_mul 1000
1494bfa_status_t
1495bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1496 struct bfa_itnim_ioprofile_s *ioprofile)
1497{
1498 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1499 if (!fcpim->io_profile)
1500 return BFA_STATUS_IOPROFILE_OFF;
1501
1502 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1503 itnim->ioprofile.io_profile_start_time =
1504 bfa_io_profile_start_time(itnim->bfa);
1505 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1506 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1507 *ioprofile = itnim->ioprofile;
1508
1509 return BFA_STATUS_OK;
1510}
1511
a36c61f9
KG
1512void
1513bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1514{
1515 int j;
6a18b167
JH
1516 memset(&itnim->stats, 0, sizeof(itnim->stats));
1517 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
a36c61f9
KG
1518 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1519 itnim->ioprofile.io_latency.min[j] = ~0;
1520}
1521
5fbe25c7 1522/*
a36c61f9
KG
1523 * BFA IO module state machine functions
1524 */
1525
5fbe25c7 1526/*
da99dcc9 1527 * IO is not started (unallocated).
a36c61f9
KG
1528 */
1529static void
1530bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1531{
a36c61f9
KG
1532 switch (event) {
1533 case BFA_IOIM_SM_START:
1534 if (!bfa_itnim_is_online(ioim->itnim)) {
1535 if (!bfa_itnim_hold_io(ioim->itnim)) {
1536 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1537 list_del(&ioim->qe);
1538 list_add_tail(&ioim->qe,
1539 &ioim->fcpim->ioim_comp_q);
1540 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1541 __bfa_cb_ioim_pathtov, ioim);
1542 } else {
1543 list_del(&ioim->qe);
1544 list_add_tail(&ioim->qe,
1545 &ioim->itnim->pending_q);
1546 }
1547 break;
1548 }
1549
1550 if (ioim->nsges > BFI_SGE_INLINE) {
e3e7d3ee 1551 if (!bfa_ioim_sgpg_alloc(ioim)) {
a36c61f9
KG
1552 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1553 return;
1554 }
1555 }
1556
1557 if (!bfa_ioim_send_ioreq(ioim)) {
1558 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1559 break;
1560 }
1561
1562 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1563 break;
1564
1565 case BFA_IOIM_SM_IOTOV:
1566 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1567 bfa_ioim_move_to_comp_q(ioim);
1568 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1569 __bfa_cb_ioim_pathtov, ioim);
1570 break;
1571
1572 case BFA_IOIM_SM_ABORT:
5fbe25c7 1573 /*
a36c61f9
KG
1574 * IO in pending queue can get abort requests. Complete abort
1575 * requests immediately.
1576 */
1577 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
d4b671c5 1578 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
a36c61f9 1579 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
83763d59
KG
1580 __bfa_cb_ioim_abort, ioim);
1581 break;
1582
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1589
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1596
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
a36c61f9
KG
1602 break;
1603
1604 default:
1605 bfa_sm_fault(ioim->bfa, event);
1606 }
1607}
1608
5fbe25c7 1609/*
da99dcc9 1610 * IO is waiting for SG pages.
a36c61f9
KG
1611 */
1612static void
1613bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1614{
1615 bfa_trc(ioim->bfa, ioim->iotag);
1616 bfa_trc(ioim->bfa, event);
1617
1618 switch (event) {
1619 case BFA_IOIM_SM_SGALLOCED:
1620 if (!bfa_ioim_send_ioreq(ioim)) {
1621 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1622 break;
1623 }
1624 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1625 break;
1626
1627 case BFA_IOIM_SM_CLEANUP:
1628 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1629 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1630 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1631 ioim);
1632 bfa_ioim_notify_cleanup(ioim);
1633 break;
1634
1635 case BFA_IOIM_SM_ABORT:
1636 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1637 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1638 bfa_ioim_move_to_comp_q(ioim);
1639 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1640 ioim);
1641 break;
1642
1643 case BFA_IOIM_SM_HWFAIL:
1644 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1645 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1646 bfa_ioim_move_to_comp_q(ioim);
1647 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1648 ioim);
1649 break;
1650
1651 default:
1652 bfa_sm_fault(ioim->bfa, event);
1653 }
1654}
1655
5fbe25c7 1656/*
da99dcc9 1657 * IO is active.
a36c61f9
KG
1658 */
1659static void
1660bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1661{
a36c61f9
KG
1662 switch (event) {
1663 case BFA_IOIM_SM_COMP_GOOD:
1664 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1665 bfa_ioim_move_to_comp_q(ioim);
1666 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1667 __bfa_cb_ioim_good_comp, ioim);
1668 break;
1669
1670 case BFA_IOIM_SM_COMP:
1671 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1672 bfa_ioim_move_to_comp_q(ioim);
1673 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1674 ioim);
1675 break;
1676
1677 case BFA_IOIM_SM_DONE:
1678 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1679 bfa_ioim_move_to_comp_q(ioim);
1680 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1681 ioim);
1682 break;
1683
1684 case BFA_IOIM_SM_ABORT:
1685 ioim->iosp->abort_explicit = BFA_TRUE;
1686 ioim->io_cbfn = __bfa_cb_ioim_abort;
1687
1688 if (bfa_ioim_send_abort(ioim))
1689 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1690 else {
1691 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1692 bfa_stats(ioim->itnim, qwait);
1693 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1694 &ioim->iosp->reqq_wait);
1695 }
1696 break;
1697
1698 case BFA_IOIM_SM_CLEANUP:
1699 ioim->iosp->abort_explicit = BFA_FALSE;
1700 ioim->io_cbfn = __bfa_cb_ioim_failed;
1701
1702 if (bfa_ioim_send_abort(ioim))
1703 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1704 else {
1705 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1706 bfa_stats(ioim->itnim, qwait);
1707 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1708 &ioim->iosp->reqq_wait);
1709 }
1710 break;
1711
1712 case BFA_IOIM_SM_HWFAIL:
1713 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1714 bfa_ioim_move_to_comp_q(ioim);
1715 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1716 ioim);
1717 break;
1718
1719 case BFA_IOIM_SM_SQRETRY:
15821f05
KG
1720 if (bfa_ioim_maxretry_reached(ioim)) {
1721 /* max retry reached, free IO */
a36c61f9
KG
1722 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1723 bfa_ioim_move_to_comp_q(ioim);
1724 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1725 __bfa_cb_ioim_failed, ioim);
1726 break;
1727 }
1728 /* waiting for IO tag resource free */
1729 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1730 break;
1731
1732 default:
1733 bfa_sm_fault(ioim->bfa, event);
1734 }
1735}
1736
5fbe25c7 1737/*
da99dcc9
MZ
1738 * IO is retried with new tag.
1739 */
a36c61f9
KG
1740static void
1741bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1742{
a36c61f9
KG
1743 switch (event) {
1744 case BFA_IOIM_SM_FREE:
1745 /* abts and rrq done. Now retry the IO with new tag */
15821f05 1746 bfa_ioim_update_iotag(ioim);
a36c61f9
KG
1747 if (!bfa_ioim_send_ioreq(ioim)) {
1748 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1749 break;
1750 }
1751 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1752 break;
1753
1754 case BFA_IOIM_SM_CLEANUP:
1755 ioim->iosp->abort_explicit = BFA_FALSE;
1756 ioim->io_cbfn = __bfa_cb_ioim_failed;
1757
1758 if (bfa_ioim_send_abort(ioim))
1759 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1760 else {
1761 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1762 bfa_stats(ioim->itnim, qwait);
1763 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1764 &ioim->iosp->reqq_wait);
1765 }
1766 break;
1767
1768 case BFA_IOIM_SM_HWFAIL:
1769 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1770 bfa_ioim_move_to_comp_q(ioim);
1771 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1772 __bfa_cb_ioim_failed, ioim);
1773 break;
1774
1775 case BFA_IOIM_SM_ABORT:
5fbe25c7 1776 /* in this state IO abort is done.
a36c61f9
KG
1777 * Waiting for IO tag resource free.
1778 */
1779 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1780 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1781 ioim);
1782 break;
1783
1784 default:
1785 bfa_sm_fault(ioim->bfa, event);
1786 }
1787}
1788
5fbe25c7 1789/*
da99dcc9 1790 * IO is being aborted, waiting for completion from firmware.
a36c61f9
KG
1791 */
1792static void
1793bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1794{
1795 bfa_trc(ioim->bfa, ioim->iotag);
1796 bfa_trc(ioim->bfa, event);
1797
1798 switch (event) {
1799 case BFA_IOIM_SM_COMP_GOOD:
1800 case BFA_IOIM_SM_COMP:
1801 case BFA_IOIM_SM_DONE:
1802 case BFA_IOIM_SM_FREE:
1803 break;
1804
1805 case BFA_IOIM_SM_ABORT_DONE:
1806 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1807 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1808 ioim);
1809 break;
1810
1811 case BFA_IOIM_SM_ABORT_COMP:
1812 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1813 bfa_ioim_move_to_comp_q(ioim);
1814 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1815 ioim);
1816 break;
1817
1818 case BFA_IOIM_SM_COMP_UTAG:
1819 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1820 bfa_ioim_move_to_comp_q(ioim);
1821 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1822 ioim);
1823 break;
1824
1825 case BFA_IOIM_SM_CLEANUP:
d4b671c5 1826 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
a36c61f9
KG
1827 ioim->iosp->abort_explicit = BFA_FALSE;
1828
1829 if (bfa_ioim_send_abort(ioim))
1830 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1831 else {
1832 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1833 bfa_stats(ioim->itnim, qwait);
1834 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1835 &ioim->iosp->reqq_wait);
1836 }
1837 break;
1838
1839 case BFA_IOIM_SM_HWFAIL:
1840 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1841 bfa_ioim_move_to_comp_q(ioim);
1842 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1843 ioim);
1844 break;
1845
1846 default:
1847 bfa_sm_fault(ioim->bfa, event);
1848 }
1849}
1850
5fbe25c7 1851/*
a36c61f9
KG
1852 * IO is being cleaned up (implicit abort), waiting for completion from
1853 * firmware.
1854 */
1855static void
1856bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1857{
1858 bfa_trc(ioim->bfa, ioim->iotag);
1859 bfa_trc(ioim->bfa, event);
1860
1861 switch (event) {
1862 case BFA_IOIM_SM_COMP_GOOD:
1863 case BFA_IOIM_SM_COMP:
1864 case BFA_IOIM_SM_DONE:
1865 case BFA_IOIM_SM_FREE:
1866 break;
1867
1868 case BFA_IOIM_SM_ABORT:
5fbe25c7 1869 /*
a36c61f9
KG
1870 * IO is already being aborted implicitly
1871 */
1872 ioim->io_cbfn = __bfa_cb_ioim_abort;
1873 break;
1874
1875 case BFA_IOIM_SM_ABORT_DONE:
1876 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1877 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1878 bfa_ioim_notify_cleanup(ioim);
1879 break;
1880
1881 case BFA_IOIM_SM_ABORT_COMP:
1882 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1883 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1884 bfa_ioim_notify_cleanup(ioim);
1885 break;
1886
1887 case BFA_IOIM_SM_COMP_UTAG:
1888 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1889 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1890 bfa_ioim_notify_cleanup(ioim);
1891 break;
1892
1893 case BFA_IOIM_SM_HWFAIL:
1894 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1895 bfa_ioim_move_to_comp_q(ioim);
1896 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1897 ioim);
1898 break;
1899
1900 case BFA_IOIM_SM_CLEANUP:
5fbe25c7 1901 /*
a36c61f9
KG
1902 * IO can be in cleanup state already due to TM command.
1903 * 2nd cleanup request comes from ITN offline event.
1904 */
1905 break;
1906
1907 default:
1908 bfa_sm_fault(ioim->bfa, event);
1909 }
1910}
1911
5fbe25c7 1912/*
da99dcc9 1913 * IO is waiting for room in request CQ
a36c61f9
KG
1914 */
1915static void
1916bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1917{
1918 bfa_trc(ioim->bfa, ioim->iotag);
1919 bfa_trc(ioim->bfa, event);
1920
1921 switch (event) {
1922 case BFA_IOIM_SM_QRESUME:
1923 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1924 bfa_ioim_send_ioreq(ioim);
1925 break;
1926
1927 case BFA_IOIM_SM_ABORT:
1928 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1929 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1930 bfa_ioim_move_to_comp_q(ioim);
1931 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1932 ioim);
1933 break;
1934
1935 case BFA_IOIM_SM_CLEANUP:
1936 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1937 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1938 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1939 ioim);
1940 bfa_ioim_notify_cleanup(ioim);
1941 break;
1942
1943 case BFA_IOIM_SM_HWFAIL:
1944 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1945 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1946 bfa_ioim_move_to_comp_q(ioim);
1947 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1948 ioim);
1949 break;
1950
1951 default:
1952 bfa_sm_fault(ioim->bfa, event);
1953 }
1954}
1955
5fbe25c7 1956/*
da99dcc9 1957 * Active IO is being aborted, waiting for room in request CQ.
a36c61f9
KG
1958 */
1959static void
1960bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1961{
1962 bfa_trc(ioim->bfa, ioim->iotag);
1963 bfa_trc(ioim->bfa, event);
1964
1965 switch (event) {
1966 case BFA_IOIM_SM_QRESUME:
1967 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1968 bfa_ioim_send_abort(ioim);
1969 break;
1970
1971 case BFA_IOIM_SM_CLEANUP:
d4b671c5 1972 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
a36c61f9
KG
1973 ioim->iosp->abort_explicit = BFA_FALSE;
1974 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1975 break;
1976
1977 case BFA_IOIM_SM_COMP_GOOD:
1978 case BFA_IOIM_SM_COMP:
1979 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1980 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1981 bfa_ioim_move_to_comp_q(ioim);
1982 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1983 ioim);
1984 break;
1985
1986 case BFA_IOIM_SM_DONE:
1987 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1988 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1989 bfa_ioim_move_to_comp_q(ioim);
1990 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1991 ioim);
1992 break;
1993
1994 case BFA_IOIM_SM_HWFAIL:
1995 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1996 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1997 bfa_ioim_move_to_comp_q(ioim);
1998 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1999 ioim);
2000 break;
2001
2002 default:
2003 bfa_sm_fault(ioim->bfa, event);
2004 }
2005}
2006
5fbe25c7 2007/*
da99dcc9 2008 * Active IO is being cleaned up, waiting for room in request CQ.
a36c61f9
KG
2009 */
2010static void
2011bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2012{
2013 bfa_trc(ioim->bfa, ioim->iotag);
2014 bfa_trc(ioim->bfa, event);
2015
2016 switch (event) {
2017 case BFA_IOIM_SM_QRESUME:
2018 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2019 bfa_ioim_send_abort(ioim);
2020 break;
2021
2022 case BFA_IOIM_SM_ABORT:
5fbe25c7 2023 /*
b595076a 2024 * IO is already being cleaned up implicitly
a36c61f9
KG
2025 */
2026 ioim->io_cbfn = __bfa_cb_ioim_abort;
2027 break;
2028
2029 case BFA_IOIM_SM_COMP_GOOD:
2030 case BFA_IOIM_SM_COMP:
2031 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2032 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2033 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2034 bfa_ioim_notify_cleanup(ioim);
2035 break;
2036
2037 case BFA_IOIM_SM_DONE:
2038 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2039 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2040 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2041 bfa_ioim_notify_cleanup(ioim);
2042 break;
2043
2044 case BFA_IOIM_SM_HWFAIL:
2045 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2046 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2047 bfa_ioim_move_to_comp_q(ioim);
2048 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2049 ioim);
2050 break;
2051
2052 default:
2053 bfa_sm_fault(ioim->bfa, event);
2054 }
2055}
2056
5fbe25c7 2057/*
a36c61f9
KG
2058 * IO bfa callback is pending.
2059 */
2060static void
2061bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2062{
a36c61f9
KG
2063 switch (event) {
2064 case BFA_IOIM_SM_HCB:
2065 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2066 bfa_ioim_free(ioim);
2067 break;
2068
2069 case BFA_IOIM_SM_CLEANUP:
2070 bfa_ioim_notify_cleanup(ioim);
2071 break;
2072
2073 case BFA_IOIM_SM_HWFAIL:
2074 break;
2075
2076 default:
2077 bfa_sm_fault(ioim->bfa, event);
2078 }
2079}
2080
5fbe25c7 2081/*
a36c61f9
KG
2082 * IO bfa callback is pending. IO resource cannot be freed.
2083 */
2084static void
2085bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2086{
2087 bfa_trc(ioim->bfa, ioim->iotag);
2088 bfa_trc(ioim->bfa, event);
2089
2090 switch (event) {
2091 case BFA_IOIM_SM_HCB:
2092 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2093 list_del(&ioim->qe);
2094 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2095 break;
2096
2097 case BFA_IOIM_SM_FREE:
2098 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2099 break;
2100
2101 case BFA_IOIM_SM_CLEANUP:
2102 bfa_ioim_notify_cleanup(ioim);
2103 break;
2104
2105 case BFA_IOIM_SM_HWFAIL:
2106 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2107 break;
2108
2109 default:
2110 bfa_sm_fault(ioim->bfa, event);
2111 }
2112}
2113
5fbe25c7 2114/*
a36c61f9
KG
2115 * IO is completed, waiting resource free from firmware.
2116 */
2117static void
2118bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2119{
2120 bfa_trc(ioim->bfa, ioim->iotag);
2121 bfa_trc(ioim->bfa, event);
2122
2123 switch (event) {
2124 case BFA_IOIM_SM_FREE:
2125 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2126 bfa_ioim_free(ioim);
2127 break;
2128
2129 case BFA_IOIM_SM_CLEANUP:
2130 bfa_ioim_notify_cleanup(ioim);
2131 break;
2132
2133 case BFA_IOIM_SM_HWFAIL:
2134 break;
2135
2136 default:
2137 bfa_sm_fault(ioim->bfa, event);
2138 }
2139}
2140
83763d59
KG
2141/*
2142 * This is called from bfa_fcpim_start after the bfa_init() with flash read
2143 * is complete by driver. now invalidate the stale content of lun mask
2144 * like unit attention, rp tag and lp tag.
2145 */
2146static void
2147bfa_ioim_lm_init(struct bfa_s *bfa)
2148{
2149 struct bfa_lun_mask_s *lunm_list;
2150 int i;
2151
2152 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2153 return;
2154
2155 lunm_list = bfa_get_lun_mask_list(bfa);
2156 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2157 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2158 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2159 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2160 }
2161}
2162
2163/*
2164 * Validate LUN for LUN masking
2165 */
2166static enum bfa_ioim_lm_status
2167bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2169{
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2174
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2179 }
2180
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2185
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2195
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2201 }
2202
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2205
2206 return BFA_IOIM_LM_PRESENT;
2207 }
2208 }
2209
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2214 }
2215
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2218
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2220}
2221
2222static bfa_boolean_t
2223bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2224{
2225 return BFA_TRUE;
2226}
2227
2228static void
2229bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2231{
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2236
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2249 }
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2252}
2253
2254static int
2255bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2257{
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2265
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2269
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2277 }
2278
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2286
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2292
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2299
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2303
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2311 }
2312
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2323 }
2324
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2331 }
2332 }
2333
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2336}
2337
2338static bfa_boolean_t
2339bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2340{
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2343
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2346
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2350}
2351
2352static bfa_boolean_t
2353bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2354{
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2361
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2365
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2369
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2375
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2378
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2382
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2388
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2391
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2398}
a36c61f9 2399
a36c61f9
KG
2400static void
2401__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2402{
2403 struct bfa_ioim_s *ioim = cbarg;
2404
2405 if (!complete) {
2406 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2407 return;
2408 }
2409
2410 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2411}
2412
2413static void
2414__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2415{
2416 struct bfa_ioim_s *ioim = cbarg;
2417 struct bfi_ioim_rsp_s *m;
2418 u8 *snsinfo = NULL;
2419 u8 sns_len = 0;
2420 s32 residue = 0;
2421
2422 if (!complete) {
2423 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2424 return;
2425 }
2426
2427 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2428 if (m->io_status == BFI_IOIM_STS_OK) {
5fbe25c7 2429 /*
a36c61f9
KG
2430 * setup sense information, if present
2431 */
2432 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2433 m->sns_len) {
2434 sns_len = m->sns_len;
e2187d7f
KG
2435 snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2436 ioim->iotag);
a36c61f9
KG
2437 }
2438
5fbe25c7 2439 /*
a36c61f9
KG
2440 * setup residue value correctly for normal completions
2441 */
2442 if (m->resid_flags == FCP_RESID_UNDER) {
ba816ea8 2443 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2444 bfa_stats(ioim->itnim, iocomp_underrun);
2445 }
2446 if (m->resid_flags == FCP_RESID_OVER) {
ba816ea8 2447 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2448 residue = -residue;
2449 bfa_stats(ioim->itnim, iocomp_overrun);
2450 }
2451 }
2452
2453 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2454 m->scsi_status, sns_len, snsinfo, residue);
2455}
2456
83763d59
KG
2457static void
2458__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2459{
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2464
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2468 }
2469
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2480}
2481
2482static void
2483__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2484{
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2489
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2493 }
2494
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2506}
2507
2508static void
2509__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2510{
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2515
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2519 }
2520
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2532}
2533
2534void
2535bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag)
2537{
2538 struct bfa_lun_mask_s *lun_list;
2539 u8 i;
2540
2541 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2542 return;
2543
2544 lun_list = bfa_get_lun_mask_list(bfa);
2545 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2546 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2547 if ((lun_list[i].lp_wwn == lp_wwn) &&
2548 (lun_list[i].rp_wwn == rp_wwn)) {
2549 lun_list[i].rp_tag = rp_tag;
2550 lun_list[i].lp_tag = lp_tag;
2551 }
2552 }
2553 }
2554}
2555
4c5d22bf
KG
2556/*
2557 * set UA for all active luns in LM DB
2558 */
2559static void
2560bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2561{
2562 struct bfa_lun_mask_s *lunm_list;
2563 int i;
2564
2565 lunm_list = bfa_get_lun_mask_list(bfa);
2566 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2567 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2568 continue;
2569 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2570 }
2571}
2572
2573bfa_status_t
2574bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2575{
2576 struct bfa_lunmask_cfg_s *lun_mask;
2577
2578 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2579 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2580 return BFA_STATUS_FAILED;
2581
2582 if (bfa_get_lun_mask_status(bfa) == update)
2583 return BFA_STATUS_NO_CHANGE;
2584
2585 lun_mask = bfa_get_lun_mask(bfa);
2586 lun_mask->status = update;
2587
2588 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2589 bfa_ioim_lm_set_ua(bfa);
2590
2591 return bfa_dconf_update(bfa);
2592}
2593
2594bfa_status_t
2595bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2596{
2597 int i;
2598 struct bfa_lun_mask_s *lunm_list;
2599
2600 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2601 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2602 return BFA_STATUS_FAILED;
2603
2604 lunm_list = bfa_get_lun_mask_list(bfa);
2605 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2606 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2607 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2608 bfa_rport_unset_lunmask(bfa,
2609 BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2610 }
2611 }
2612
2613 memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2614 return bfa_dconf_update(bfa);
2615}
2616
2617bfa_status_t
2618bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2619{
2620 struct bfa_lunmask_cfg_s *lun_mask;
2621
2622 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2623 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2624 return BFA_STATUS_FAILED;
2625
2626 lun_mask = bfa_get_lun_mask(bfa);
2627 memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2628 return BFA_STATUS_OK;
2629}
2630
2631bfa_status_t
2632bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2633 wwn_t rpwwn, struct scsi_lun lun)
2634{
2635 struct bfa_lun_mask_s *lunm_list;
2636 struct bfa_rport_s *rp = NULL;
2637 int i, free_index = MAX_LUN_MASK_CFG + 1;
2638 struct bfa_fcs_lport_s *port = NULL;
2639 struct bfa_fcs_rport_s *rp_fcs;
2640
2641 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2642 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2643 return BFA_STATUS_FAILED;
2644
2645 port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2646 vf_id, *pwwn);
2647 if (port) {
2648 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport;
2651 }
2652
2653 lunm_list = bfa_get_lun_mask_list(bfa);
2654 /* if entry exists */
2655 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2656 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2657 free_index = i;
2658 if ((lunm_list[i].lp_wwn == *pwwn) &&
2659 (lunm_list[i].rp_wwn == rpwwn) &&
2660 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2661 scsilun_to_int((struct scsi_lun *)&lun)))
2662 return BFA_STATUS_ENTRY_EXISTS;
2663 }
2664
2665 if (free_index > MAX_LUN_MASK_CFG)
2666 return BFA_STATUS_MAX_ENTRY_REACHED;
2667
2668 if (rp) {
2669 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2670 rp->rport_info.local_pid);
2671 lunm_list[free_index].rp_tag = rp->rport_tag;
2672 } else {
2673 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2674 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2675 }
2676
2677 lunm_list[free_index].lp_wwn = *pwwn;
2678 lunm_list[free_index].rp_wwn = rpwwn;
2679 lunm_list[free_index].lun = lun;
2680 lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2681
2682 /* set for all luns in this rp */
2683 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2684 if ((lunm_list[i].lp_wwn == *pwwn) &&
2685 (lunm_list[i].rp_wwn == rpwwn))
2686 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2687 }
2688
2689 return bfa_dconf_update(bfa);
2690}
2691
2692bfa_status_t
2693bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2694 wwn_t rpwwn, struct scsi_lun lun)
2695{
2696 struct bfa_lun_mask_s *lunm_list;
2697 struct bfa_rport_s *rp = NULL;
2698 struct bfa_fcs_lport_s *port = NULL;
2699 struct bfa_fcs_rport_s *rp_fcs;
2700 int i;
2701
2702 /* in min cfg lunm_list could be NULL but no commands should run. */
2703 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2704 return BFA_STATUS_FAILED;
2705
2706 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2707 bfa_trc(bfa, *pwwn);
2708 bfa_trc(bfa, rpwwn);
2709 bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2710
2711 if (*pwwn == 0) {
2712 port = bfa_fcs_lookup_port(
2713 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2714 vf_id, *pwwn);
2715 if (port) {
2716 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport;
2719 }
2720 }
2721
2722 lunm_list = bfa_get_lun_mask_list(bfa);
2723 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2724 if ((lunm_list[i].lp_wwn == *pwwn) &&
2725 (lunm_list[i].rp_wwn == rpwwn) &&
2726 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2727 scsilun_to_int((struct scsi_lun *)&lun))) {
2728 lunm_list[i].lp_wwn = 0;
2729 lunm_list[i].rp_wwn = 0;
2730 int_to_scsilun(0, &lunm_list[i].lun);
2731 lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2732 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2733 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2734 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2735 }
2736 return bfa_dconf_update(bfa);
2737 }
2738 }
2739
2740 /* set for all luns in this rp */
2741 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2742 if ((lunm_list[i].lp_wwn == *pwwn) &&
2743 (lunm_list[i].rp_wwn == rpwwn))
2744 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2745 }
2746
2747 return BFA_STATUS_ENTRY_NOT_EXISTS;
2748}
2749
a36c61f9
KG
2750static void
2751__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2752{
2753 struct bfa_ioim_s *ioim = cbarg;
2754
2755 if (!complete) {
2756 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2757 return;
2758 }
2759
83763d59 2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
a36c61f9
KG
2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2762 0, 0, NULL, 0);
2763}
2764
2765static void
2766__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2767{
2768 struct bfa_ioim_s *ioim = cbarg;
2769
2770 bfa_stats(ioim->itnim, path_tov_expired);
2771 if (!complete) {
2772 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2773 return;
2774 }
2775
83763d59 2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
a36c61f9
KG
2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2778 0, 0, NULL, 0);
2779}
2780
2781static void
2782__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2783{
2784 struct bfa_ioim_s *ioim = cbarg;
2785
2786 if (!complete) {
2787 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2788 return;
2789 }
2790
83763d59 2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
a36c61f9
KG
2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2793}
2794
2795static void
2796bfa_ioim_sgpg_alloced(void *cbarg)
2797{
2798 struct bfa_ioim_s *ioim = cbarg;
2799
2800 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2801 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
e3e7d3ee 2802 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
a36c61f9
KG
2803 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2804}
2805
5fbe25c7 2806/*
a36c61f9
KG
2807 * Send I/O request to firmware.
2808 */
2809static bfa_boolean_t
2810bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2811{
2812 struct bfa_itnim_s *itnim = ioim->itnim;
2813 struct bfi_ioim_req_s *m;
da99dcc9 2814 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
e3e7d3ee 2815 struct bfi_sge_s *sge, *sgpge;
a36c61f9
KG
2816 u32 pgdlen = 0;
2817 u32 fcp_dl;
2818 u64 addr;
2819 struct scatterlist *sg;
e3e7d3ee 2820 struct bfa_sgpg_s *sgpg;
a36c61f9 2821 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
e3e7d3ee 2822 u32 i, sge_id, pgcumsz;
f314878a 2823 enum dma_data_direction dmadir;
a36c61f9 2824
5fbe25c7 2825 /*
a36c61f9
KG
2826 * check for room in queue to send request now
2827 */
2828 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2829 if (!m) {
2830 bfa_stats(ioim->itnim, qwait);
2831 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2832 &ioim->iosp->reqq_wait);
2833 return BFA_FALSE;
2834 }
2835
5fbe25c7 2836 /*
a36c61f9
KG
2837 * build i/o request message next
2838 */
ba816ea8 2839 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9 2840 m->rport_hdl = ioim->itnim->rport->fw_handle;
f314878a 2841 m->io_timeout = 0;
a36c61f9 2842
a36c61f9 2843 sge = &m->sges[0];
e3e7d3ee
MZ
2844 sgpg = ioim->sgpg;
2845 sge_id = 0;
2846 sgpge = NULL;
2847 pgcumsz = 0;
2848 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2849 if (i == 0) {
2850 /* build inline IO SG element */
f16a1750 2851 addr = bfa_sgaddr_le(sg_dma_address(sg));
e3e7d3ee
MZ
2852 sge->sga = *(union bfi_addr_u *) &addr;
2853 pgdlen = sg_dma_len(sg);
2854 sge->sg_len = pgdlen;
2855 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
a36c61f9 2856 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
e3e7d3ee
MZ
2857 bfa_sge_to_be(sge);
2858 sge++;
2859 } else {
2860 if (sge_id == 0)
2861 sgpge = sgpg->sgpg->sges;
2862
f16a1750 2863 addr = bfa_sgaddr_le(sg_dma_address(sg));
e3e7d3ee
MZ
2864 sgpge->sga = *(union bfi_addr_u *) &addr;
2865 sgpge->sg_len = sg_dma_len(sg);
2866 pgcumsz += sgpge->sg_len;
2867
2868 /* set flags */
2869 if (i < (ioim->nsges - 1) &&
2870 sge_id < (BFI_SGPG_DATA_SGES - 1))
2871 sgpge->flags = BFI_SGE_DATA;
2872 else if (i < (ioim->nsges - 1))
2873 sgpge->flags = BFI_SGE_DATA_CPL;
2874 else
2875 sgpge->flags = BFI_SGE_DATA_LAST;
2876
2877 bfa_sge_to_le(sgpge);
2878
2879 sgpge++;
2880 if (i == (ioim->nsges - 1)) {
2881 sgpge->flags = BFI_SGE_PGDLEN;
2882 sgpge->sga.a32.addr_lo = 0;
2883 sgpge->sga.a32.addr_hi = 0;
2884 sgpge->sg_len = pgcumsz;
2885 bfa_sge_to_le(sgpge);
2886 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2887 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2888 sgpge->flags = BFI_SGE_LINK;
2889 sgpge->sga = sgpg->sgpg_pa;
2890 sgpge->sg_len = pgcumsz;
2891 bfa_sge_to_le(sgpge);
2892 sge_id = 0;
2893 pgcumsz = 0;
2894 }
2895 }
a36c61f9
KG
2896 }
2897
2898 if (ioim->nsges > BFI_SGE_INLINE) {
2899 sge->sga = ioim->sgpg->sgpg_pa;
2900 } else {
2901 sge->sga.a32.addr_lo = 0;
2902 sge->sga.a32.addr_hi = 0;
2903 }
2904 sge->sg_len = pgdlen;
2905 sge->flags = BFI_SGE_PGDLEN;
2906 bfa_sge_to_be(sge);
2907
5fbe25c7 2908 /*
a36c61f9
KG
2909 * set up I/O command parameters
2910 */
6a18b167 2911 m->cmnd = cmnd_z0;
f314878a
MZ
2912 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2913 dmadir = cmnd->sc_data_direction;
2914 if (dmadir == DMA_TO_DEVICE)
2915 m->cmnd.iodir = FCP_IODIR_WRITE;
2916 else if (dmadir == DMA_FROM_DEVICE)
2917 m->cmnd.iodir = FCP_IODIR_READ;
2918 else
2919 m->cmnd.iodir = FCP_IODIR_NONE;
2920
8f4bfadd 2921 m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
f314878a 2922 fcp_dl = scsi_bufflen(cmnd);
ba816ea8 2923 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
a36c61f9 2924
5fbe25c7 2925 /*
a36c61f9
KG
2926 * set up I/O message header
2927 */
2928 switch (m->cmnd.iodir) {
2929 case FCP_IODIR_READ:
3fd45980 2930 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
a36c61f9
KG
2931 bfa_stats(itnim, input_reqs);
2932 ioim->itnim->stats.rd_throughput += fcp_dl;
2933 break;
2934 case FCP_IODIR_WRITE:
3fd45980 2935 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
a36c61f9
KG
2936 bfa_stats(itnim, output_reqs);
2937 ioim->itnim->stats.wr_throughput += fcp_dl;
2938 break;
2939 case FCP_IODIR_RW:
2940 bfa_stats(itnim, input_reqs);
2941 bfa_stats(itnim, output_reqs);
2942 default:
3fd45980 2943 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
a36c61f9
KG
2944 }
2945 if (itnim->seq_rec ||
f314878a 2946 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
3fd45980 2947 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
a36c61f9 2948
5fbe25c7 2949 /*
a36c61f9
KG
2950 * queue I/O message to firmware
2951 */
3fd45980 2952 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
a36c61f9
KG
2953 return BFA_TRUE;
2954}
2955
5fbe25c7 2956/*
a36c61f9
KG
2957 * Setup any additional SG pages needed.Inline SG element is setup
2958 * at queuing time.
2959 */
2960static bfa_boolean_t
e3e7d3ee 2961bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
a36c61f9
KG
2962{
2963 u16 nsgpgs;
2964
d4b671c5 2965 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
a36c61f9 2966
5fbe25c7 2967 /*
a36c61f9
KG
2968 * allocate SG pages needed
2969 */
2970 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2971 if (!nsgpgs)
2972 return BFA_TRUE;
2973
2974 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2975 != BFA_STATUS_OK) {
2976 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2977 return BFA_FALSE;
2978 }
2979
2980 ioim->nsgpgs = nsgpgs;
e3e7d3ee 2981 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
a36c61f9
KG
2982
2983 return BFA_TRUE;
2984}
2985
5fbe25c7 2986/*
a36c61f9
KG
2987 * Send I/O abort request to firmware.
2988 */
2989static bfa_boolean_t
2990bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2991{
2992 struct bfi_ioim_abort_req_s *m;
2993 enum bfi_ioim_h2i msgop;
2994
5fbe25c7 2995 /*
a36c61f9
KG
2996 * check for room in queue to send request now
2997 */
2998 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2999 if (!m)
3000 return BFA_FALSE;
3001
5fbe25c7 3002 /*
a36c61f9
KG
3003 * build i/o request message next
3004 */
3005 if (ioim->iosp->abort_explicit)
3006 msgop = BFI_IOIM_H2I_IOABORT_REQ;
3007 else
3008 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
3009
3fd45980 3010 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
ba816ea8 3011 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9
KG
3012 m->abort_tag = ++ioim->abort_tag;
3013
5fbe25c7 3014 /*
a36c61f9
KG
3015 * queue I/O message to firmware
3016 */
3fd45980 3017 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
a36c61f9
KG
3018 return BFA_TRUE;
3019}
3020
5fbe25c7 3021/*
a36c61f9
KG
3022 * Call to resume any I/O requests waiting for room in request queue.
3023 */
3024static void
3025bfa_ioim_qresume(void *cbarg)
3026{
3027 struct bfa_ioim_s *ioim = cbarg;
3028
3029 bfa_stats(ioim->itnim, qresumes);
3030 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
3031}
3032
3033
3034static void
3035bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
3036{
5fbe25c7 3037 /*
a36c61f9
KG
3038 * Move IO from itnim queue to fcpim global queue since itnim will be
3039 * freed.
3040 */
3041 list_del(&ioim->qe);
3042 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3043
3044 if (!ioim->iosp->tskim) {
3045 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
3046 bfa_cb_dequeue(&ioim->hcb_qe);
3047 list_del(&ioim->qe);
3048 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
3049 }
3050 bfa_itnim_iodone(ioim->itnim);
3051 } else
f7f73812 3052 bfa_wc_down(&ioim->iosp->tskim->wc);
a36c61f9
KG
3053}
3054
3055static bfa_boolean_t
3056bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
3057{
3058 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
3059 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
3060 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
3061 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
3062 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
3063 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
3064 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
3065 return BFA_FALSE;
3066
3067 return BFA_TRUE;
3068}
3069
a36c61f9
KG
3070void
3071bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
3072{
5fbe25c7 3073 /*
a36c61f9
KG
3074 * If path tov timer expired, failback with PATHTOV status - these
3075 * IO requests are not normally retried by IO stack.
3076 *
3077 * Otherwise device cameback online and fail it with normal failed
3078 * status so that IO stack retries these failed IO requests.
3079 */
3080 if (iotov)
3081 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
3082 else {
3083 ioim->io_cbfn = __bfa_cb_ioim_failed;
3084 bfa_stats(ioim->itnim, iocom_nexus_abort);
3085 }
3086 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
3087
5fbe25c7 3088 /*
a36c61f9
KG
3089 * Move IO to fcpim global queue since itnim will be
3090 * freed.
3091 */
3092 list_del(&ioim->qe);
3093 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3094}
3095
3096
5fbe25c7 3097/*
a36c61f9
KG
3098 * Memory allocation and initialization.
3099 */
3100void
4507025d 3101bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
a36c61f9
KG
3102{
3103 struct bfa_ioim_s *ioim;
4507025d 3104 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
a36c61f9
KG
3105 struct bfa_ioim_sp_s *iosp;
3106 u16 i;
a36c61f9 3107
5fbe25c7 3108 /*
a36c61f9
KG
3109 * claim memory first
3110 */
4507025d 3111 ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
a36c61f9 3112 fcpim->ioim_arr = ioim;
4507025d 3113 bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
a36c61f9 3114
4507025d 3115 iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
a36c61f9 3116 fcpim->ioim_sp_arr = iosp;
4507025d 3117 bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
a36c61f9 3118
5fbe25c7 3119 /*
a36c61f9
KG
3120 * Initialize ioim free queues
3121 */
a36c61f9
KG
3122 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
3123 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
3124
e2187d7f
KG
3125 for (i = 0; i < fcpim->fcp->num_ioim_reqs;
3126 i++, ioim++, iosp++) {
a36c61f9
KG
3127 /*
3128 * initialize IOIM
3129 */
6a18b167 3130 memset(ioim, 0, sizeof(struct bfa_ioim_s));
a36c61f9
KG
3131 ioim->iotag = i;
3132 ioim->bfa = fcpim->bfa;
3133 ioim->fcpim = fcpim;
3134 ioim->iosp = iosp;
83763d59 3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
a36c61f9
KG
3136 INIT_LIST_HEAD(&ioim->sgpg_q);
3137 bfa_reqq_winit(&ioim->iosp->reqq_wait,
3138 bfa_ioim_qresume, ioim);
3139 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
3140 bfa_ioim_sgpg_alloced, ioim);
3141 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
a36c61f9
KG
3142 }
3143}
3144
a36c61f9
KG
3145void
3146bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3147{
e2187d7f 3148 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
a36c61f9
KG
3149 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
3150 struct bfa_ioim_s *ioim;
3151 u16 iotag;
3152 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
3153
ba816ea8 3154 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
3155
3156 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
d4b671c5 3157 WARN_ON(ioim->iotag != iotag);
a36c61f9
KG
3158
3159 bfa_trc(ioim->bfa, ioim->iotag);
3160 bfa_trc(ioim->bfa, rsp->io_status);
3161 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
3162
3163 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
6a18b167 3164 ioim->iosp->comp_rspmsg = *m;
a36c61f9
KG
3165
3166 switch (rsp->io_status) {
3167 case BFI_IOIM_STS_OK:
3168 bfa_stats(ioim->itnim, iocomp_ok);
3169 if (rsp->reuse_io_tag == 0)
3170 evt = BFA_IOIM_SM_DONE;
3171 else
3172 evt = BFA_IOIM_SM_COMP;
83763d59 3173 ioim->proc_rsp_data(ioim);
a36c61f9
KG
3174 break;
3175
3176 case BFI_IOIM_STS_TIMEDOUT:
3177 bfa_stats(ioim->itnim, iocomp_timedout);
3178 case BFI_IOIM_STS_ABORTED:
3179 rsp->io_status = BFI_IOIM_STS_ABORTED;
3180 bfa_stats(ioim->itnim, iocomp_aborted);
3181 if (rsp->reuse_io_tag == 0)
3182 evt = BFA_IOIM_SM_DONE;
3183 else
3184 evt = BFA_IOIM_SM_COMP;
3185 break;
3186
3187 case BFI_IOIM_STS_PROTO_ERR:
3188 bfa_stats(ioim->itnim, iocom_proto_err);
d4b671c5 3189 WARN_ON(!rsp->reuse_io_tag);
a36c61f9
KG
3190 evt = BFA_IOIM_SM_COMP;
3191 break;
3192
3193 case BFI_IOIM_STS_SQER_NEEDED:
3194 bfa_stats(ioim->itnim, iocom_sqer_needed);
d4b671c5 3195 WARN_ON(rsp->reuse_io_tag != 0);
a36c61f9
KG
3196 evt = BFA_IOIM_SM_SQRETRY;
3197 break;
3198
3199 case BFI_IOIM_STS_RES_FREE:
3200 bfa_stats(ioim->itnim, iocom_res_free);
3201 evt = BFA_IOIM_SM_FREE;
3202 break;
3203
3204 case BFI_IOIM_STS_HOST_ABORTED:
3205 bfa_stats(ioim->itnim, iocom_hostabrts);
3206 if (rsp->abort_tag != ioim->abort_tag) {
3207 bfa_trc(ioim->bfa, rsp->abort_tag);
3208 bfa_trc(ioim->bfa, ioim->abort_tag);
83763d59 3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
a36c61f9
KG
3210 return;
3211 }
3212
3213 if (rsp->reuse_io_tag)
3214 evt = BFA_IOIM_SM_ABORT_COMP;
3215 else
3216 evt = BFA_IOIM_SM_ABORT_DONE;
3217 break;
3218
3219 case BFI_IOIM_STS_UTAG:
3220 bfa_stats(ioim->itnim, iocom_utags);
3221 evt = BFA_IOIM_SM_COMP_UTAG;
3222 break;
3223
3224 default:
d4b671c5 3225 WARN_ON(1);
a36c61f9
KG
3226 }
3227
83763d59 3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
a36c61f9
KG
3229 bfa_sm_send_event(ioim, evt);
3230}
3231
3232void
3233bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3234{
e2187d7f 3235 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
a36c61f9
KG
3236 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
3237 struct bfa_ioim_s *ioim;
3238 u16 iotag;
3239
ba816ea8 3240 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
3241
3242 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
d4b671c5 3243 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
a36c61f9 3244
a36c61f9 3245 bfa_ioim_cb_profile_comp(fcpim, ioim);
83763d59
KG
3246
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3250 }
3251
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
a36c61f9
KG
3256}
3257
5fbe25c7 3258/*
a36c61f9
KG
3259 * Called by itnim to clean up IO while going offline.
3260 */
3261void
3262bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
3263{
3264 bfa_trc(ioim->bfa, ioim->iotag);
3265 bfa_stats(ioim->itnim, io_cleanups);
3266
3267 ioim->iosp->tskim = NULL;
3268 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
3269}
3270
3271void
3272bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
3273{
3274 bfa_trc(ioim->bfa, ioim->iotag);
3275 bfa_stats(ioim->itnim, io_tmaborts);
3276
3277 ioim->iosp->tskim = tskim;
3278 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
3279}
3280
5fbe25c7 3281/*
a36c61f9
KG
3282 * IOC failure handling.
3283 */
3284void
3285bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
3286{
3287 bfa_trc(ioim->bfa, ioim->iotag);
3288 bfa_stats(ioim->itnim, io_iocdowns);
3289 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
3290}
3291
5fbe25c7 3292/*
a36c61f9
KG
3293 * IO offline TOV popped. Fail the pending IO.
3294 */
3295void
3296bfa_ioim_tov(struct bfa_ioim_s *ioim)
3297{
3298 bfa_trc(ioim->bfa, ioim->iotag);
3299 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
3300}
3301
3302
5fbe25c7 3303/*
a36c61f9
KG
3304 * Allocate IOIM resource for initiator mode I/O request.
3305 */
3306struct bfa_ioim_s *
3307bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
3308 struct bfa_itnim_s *itnim, u16 nsges)
3309{
e2187d7f 3310 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
a36c61f9 3311 struct bfa_ioim_s *ioim;
e2187d7f 3312 struct bfa_iotag_s *iotag = NULL;
a36c61f9 3313
5fbe25c7 3314 /*
a36c61f9
KG
3315 * alocate IOIM resource
3316 */
e2187d7f
KG
3317 bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
3318 if (!iotag) {
a36c61f9
KG
3319 bfa_stats(itnim, no_iotags);
3320 return NULL;
3321 }
3322
e2187d7f
KG
3323 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
3324
a36c61f9
KG
3325 ioim->dio = dio;
3326 ioim->itnim = itnim;
3327 ioim->nsges = nsges;
3328 ioim->nsgpgs = 0;
3329
3330 bfa_stats(itnim, total_ios);
3331 fcpim->ios_active++;
3332
3333 list_add_tail(&ioim->qe, &itnim->io_q);
a36c61f9
KG
3334
3335 return ioim;
3336}
3337
3338void
3339bfa_ioim_free(struct bfa_ioim_s *ioim)
3340{
e2187d7f
KG
3341 struct bfa_fcpim_s *fcpim = ioim->fcpim;
3342 struct bfa_iotag_s *iotag;
a36c61f9 3343
a36c61f9
KG
3344 if (ioim->nsgpgs > 0)
3345 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
3346
3347 bfa_stats(ioim->itnim, io_comps);
3348 fcpim->ios_active--;
3349
15821f05 3350 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
e2187d7f
KG
3351
3352 WARN_ON(!(ioim->iotag <
3353 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
3354 iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
3355
3356 if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
3357 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
3358 else
3359 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
3360
a36c61f9 3361 list_del(&ioim->qe);
a36c61f9
KG
3362}
3363
3364void
3365bfa_ioim_start(struct bfa_ioim_s *ioim)
3366{
83763d59
KG
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3371
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3381 }
3382
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3387 }
3388
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3393 }
3394 }
3395
a36c61f9
KG
3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3397
5fbe25c7 3398 /*
a36c61f9
KG
3399 * Obtain the queue over which this request has to be issued
3400 */
3401 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
f314878a 3402 BFA_FALSE : bfa_itnim_get_reqq(ioim);
a36c61f9
KG
3403
3404 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
3405}
3406
5fbe25c7 3407/*
a36c61f9
KG
3408 * Driver I/O abort request.
3409 */
3410bfa_status_t
3411bfa_ioim_abort(struct bfa_ioim_s *ioim)
3412{
3413
3414 bfa_trc(ioim->bfa, ioim->iotag);
3415
3416 if (!bfa_ioim_is_abortable(ioim))
3417 return BFA_STATUS_FAILED;
3418
3419 bfa_stats(ioim->itnim, io_aborts);
3420 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3421
3422 return BFA_STATUS_OK;
3423}
3424
5fbe25c7 3425/*
a36c61f9
KG
3426 * BFA TSKIM state machine functions
3427 */
3428
5fbe25c7 3429/*
da99dcc9 3430 * Task management command beginning state.
a36c61f9
KG
3431 */
3432static void
3433bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3434{
3435 bfa_trc(tskim->bfa, event);
3436
3437 switch (event) {
3438 case BFA_TSKIM_SM_START:
3439 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3440 bfa_tskim_gather_ios(tskim);
3441
5fbe25c7 3442 /*
a36c61f9
KG
3443 * If device is offline, do not send TM on wire. Just cleanup
3444 * any pending IO requests and complete TM request.
3445 */
3446 if (!bfa_itnim_is_online(tskim->itnim)) {
3447 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3448 tskim->tsk_status = BFI_TSKIM_STS_OK;
3449 bfa_tskim_cleanup_ios(tskim);
3450 return;
3451 }
3452
3453 if (!bfa_tskim_send(tskim)) {
3454 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3455 bfa_stats(tskim->itnim, tm_qwait);
3456 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3457 &tskim->reqq_wait);
3458 }
3459 break;
3460
3461 default:
3462 bfa_sm_fault(tskim->bfa, event);
3463 }
3464}
3465
5fbe25c7 3466/*
da99dcc9
MZ
3467 * TM command is active, awaiting completion from firmware to
3468 * cleanup IO requests in TM scope.
a36c61f9
KG
3469 */
3470static void
3471bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3472{
3473 bfa_trc(tskim->bfa, event);
3474
3475 switch (event) {
3476 case BFA_TSKIM_SM_DONE:
3477 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3478 bfa_tskim_cleanup_ios(tskim);
3479 break;
3480
3481 case BFA_TSKIM_SM_CLEANUP:
3482 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3483 if (!bfa_tskim_send_abort(tskim)) {
3484 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3485 bfa_stats(tskim->itnim, tm_qwait);
3486 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3487 &tskim->reqq_wait);
3488 }
3489 break;
3490
3491 case BFA_TSKIM_SM_HWFAIL:
3492 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3493 bfa_tskim_iocdisable_ios(tskim);
3494 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3495 break;
3496
3497 default:
3498 bfa_sm_fault(tskim->bfa, event);
3499 }
3500}
3501
5fbe25c7 3502/*
da99dcc9
MZ
3503 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3504 * completion event from firmware.
a36c61f9
KG
3505 */
3506static void
3507bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3508{
3509 bfa_trc(tskim->bfa, event);
3510
3511 switch (event) {
3512 case BFA_TSKIM_SM_DONE:
5fbe25c7 3513 /*
a36c61f9
KG
3514 * Ignore and wait for ABORT completion from firmware.
3515 */
3516 break;
3517
3518 case BFA_TSKIM_SM_CLEANUP_DONE:
3519 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3520 bfa_tskim_cleanup_ios(tskim);
3521 break;
3522
3523 case BFA_TSKIM_SM_HWFAIL:
3524 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3525 bfa_tskim_iocdisable_ios(tskim);
3526 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3527 break;
3528
3529 default:
3530 bfa_sm_fault(tskim->bfa, event);
3531 }
3532}
3533
3534static void
3535bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3536{
3537 bfa_trc(tskim->bfa, event);
3538
3539 switch (event) {
3540 case BFA_TSKIM_SM_IOS_DONE:
3541 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3542 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3543 break;
3544
3545 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3546 /*
a36c61f9
KG
3547 * Ignore, TM command completed on wire.
3548 * Notify TM conmpletion on IO cleanup completion.
3549 */
3550 break;
3551
3552 case BFA_TSKIM_SM_HWFAIL:
3553 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3554 bfa_tskim_iocdisable_ios(tskim);
3555 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3556 break;
3557
3558 default:
3559 bfa_sm_fault(tskim->bfa, event);
3560 }
3561}
3562
5fbe25c7 3563/*
da99dcc9 3564 * Task management command is waiting for room in request CQ
a36c61f9
KG
3565 */
3566static void
3567bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3568{
3569 bfa_trc(tskim->bfa, event);
3570
3571 switch (event) {
3572 case BFA_TSKIM_SM_QRESUME:
3573 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3574 bfa_tskim_send(tskim);
3575 break;
3576
3577 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3578 /*
a36c61f9
KG
3579 * No need to send TM on wire since ITN is offline.
3580 */
3581 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3582 bfa_reqq_wcancel(&tskim->reqq_wait);
3583 bfa_tskim_cleanup_ios(tskim);
3584 break;
3585
3586 case BFA_TSKIM_SM_HWFAIL:
3587 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3588 bfa_reqq_wcancel(&tskim->reqq_wait);
3589 bfa_tskim_iocdisable_ios(tskim);
3590 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3591 break;
3592
3593 default:
3594 bfa_sm_fault(tskim->bfa, event);
3595 }
3596}
3597
5fbe25c7 3598/*
da99dcc9
MZ
3599 * Task management command is active, awaiting for room in request CQ
3600 * to send clean up request.
a36c61f9
KG
3601 */
3602static void
3603bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3604 enum bfa_tskim_event event)
3605{
3606 bfa_trc(tskim->bfa, event);
3607
3608 switch (event) {
3609 case BFA_TSKIM_SM_DONE:
3610 bfa_reqq_wcancel(&tskim->reqq_wait);
5fbe25c7 3611 /*
a36c61f9
KG
3612 * Fall through !!!
3613 */
a36c61f9
KG
3614 case BFA_TSKIM_SM_QRESUME:
3615 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3616 bfa_tskim_send_abort(tskim);
3617 break;
3618
3619 case BFA_TSKIM_SM_HWFAIL:
3620 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3621 bfa_reqq_wcancel(&tskim->reqq_wait);
3622 bfa_tskim_iocdisable_ios(tskim);
3623 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3624 break;
3625
3626 default:
3627 bfa_sm_fault(tskim->bfa, event);
3628 }
3629}
3630
5fbe25c7 3631/*
da99dcc9 3632 * BFA callback is pending
a36c61f9
KG
3633 */
3634static void
3635bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3636{
3637 bfa_trc(tskim->bfa, event);
3638
3639 switch (event) {
3640 case BFA_TSKIM_SM_HCB:
3641 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3642 bfa_tskim_free(tskim);
3643 break;
3644
3645 case BFA_TSKIM_SM_CLEANUP:
3646 bfa_tskim_notify_comp(tskim);
3647 break;
3648
3649 case BFA_TSKIM_SM_HWFAIL:
3650 break;
3651
3652 default:
3653 bfa_sm_fault(tskim->bfa, event);
3654 }
3655}
3656
a36c61f9
KG
3657static void
3658__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3659{
3660 struct bfa_tskim_s *tskim = cbarg;
3661
3662 if (!complete) {
3663 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3664 return;
3665 }
3666
3667 bfa_stats(tskim->itnim, tm_success);
3668 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3669}
3670
3671static void
3672__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3673{
3674 struct bfa_tskim_s *tskim = cbarg;
3675
3676 if (!complete) {
3677 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3678 return;
3679 }
3680
3681 bfa_stats(tskim->itnim, tm_failures);
3682 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3683 BFI_TSKIM_STS_FAILED);
3684}
3685
da99dcc9 3686static bfa_boolean_t
f314878a 3687bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
a36c61f9
KG
3688{
3689 switch (tskim->tm_cmnd) {
3690 case FCP_TM_TARGET_RESET:
3691 return BFA_TRUE;
3692
3693 case FCP_TM_ABORT_TASK_SET:
3694 case FCP_TM_CLEAR_TASK_SET:
3695 case FCP_TM_LUN_RESET:
3696 case FCP_TM_CLEAR_ACA:
da99dcc9 3697 return !memcmp(&tskim->lun, &lun, sizeof(lun));
a36c61f9
KG
3698
3699 default:
d4b671c5 3700 WARN_ON(1);
a36c61f9
KG
3701 }
3702
3703 return BFA_FALSE;
3704}
3705
5fbe25c7 3706/*
da99dcc9 3707 * Gather affected IO requests and task management commands.
a36c61f9
KG
3708 */
3709static void
3710bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3711{
3712 struct bfa_itnim_s *itnim = tskim->itnim;
3713 struct bfa_ioim_s *ioim;
f314878a
MZ
3714 struct list_head *qe, *qen;
3715 struct scsi_cmnd *cmnd;
3716 struct scsi_lun scsilun;
a36c61f9
KG
3717
3718 INIT_LIST_HEAD(&tskim->io_q);
3719
5fbe25c7 3720 /*
a36c61f9
KG
3721 * Gather any active IO requests first.
3722 */
3723 list_for_each_safe(qe, qen, &itnim->io_q) {
3724 ioim = (struct bfa_ioim_s *) qe;
f314878a
MZ
3725 cmnd = (struct scsi_cmnd *) ioim->dio;
3726 int_to_scsilun(cmnd->device->lun, &scsilun);
3727 if (bfa_tskim_match_scope(tskim, scsilun)) {
a36c61f9
KG
3728 list_del(&ioim->qe);
3729 list_add_tail(&ioim->qe, &tskim->io_q);
3730 }
3731 }
3732
5fbe25c7 3733 /*
a36c61f9
KG
3734 * Failback any pending IO requests immediately.
3735 */
3736 list_for_each_safe(qe, qen, &itnim->pending_q) {
3737 ioim = (struct bfa_ioim_s *) qe;
f314878a
MZ
3738 cmnd = (struct scsi_cmnd *) ioim->dio;
3739 int_to_scsilun(cmnd->device->lun, &scsilun);
3740 if (bfa_tskim_match_scope(tskim, scsilun)) {
a36c61f9
KG
3741 list_del(&ioim->qe);
3742 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3743 bfa_ioim_tov(ioim);
3744 }
3745 }
3746}
3747
5fbe25c7 3748/*
da99dcc9 3749 * IO cleanup completion
a36c61f9
KG
3750 */
3751static void
3752bfa_tskim_cleanp_comp(void *tskim_cbarg)
3753{
3754 struct bfa_tskim_s *tskim = tskim_cbarg;
3755
3756 bfa_stats(tskim->itnim, tm_io_comps);
3757 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3758}
3759
5fbe25c7 3760/*
da99dcc9 3761 * Gather affected IO requests and task management commands.
a36c61f9
KG
3762 */
3763static void
3764bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3765{
3766 struct bfa_ioim_s *ioim;
3767 struct list_head *qe, *qen;
3768
3769 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3770
3771 list_for_each_safe(qe, qen, &tskim->io_q) {
3772 ioim = (struct bfa_ioim_s *) qe;
3773 bfa_wc_up(&tskim->wc);
3774 bfa_ioim_cleanup_tm(ioim, tskim);
3775 }
3776
3777 bfa_wc_wait(&tskim->wc);
3778}
3779
5fbe25c7 3780/*
da99dcc9 3781 * Send task management request to firmware.
a36c61f9
KG
3782 */
3783static bfa_boolean_t
3784bfa_tskim_send(struct bfa_tskim_s *tskim)
3785{
3786 struct bfa_itnim_s *itnim = tskim->itnim;
3787 struct bfi_tskim_req_s *m;
3788
5fbe25c7 3789 /*
a36c61f9
KG
3790 * check for room in queue to send request now
3791 */
3792 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3793 if (!m)
3794 return BFA_FALSE;
3795
5fbe25c7 3796 /*
a36c61f9
KG
3797 * build i/o request message next
3798 */
3799 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3fd45980 3800 bfa_fn_lpu(tskim->bfa));
a36c61f9 3801
ba816ea8 3802 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9
KG
3803 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3804 m->t_secs = tskim->tsecs;
3805 m->lun = tskim->lun;
3806 m->tm_flags = tskim->tm_cmnd;
3807
5fbe25c7 3808 /*
a36c61f9
KG
3809 * queue I/O message to firmware
3810 */
3fd45980 3811 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
a36c61f9
KG
3812 return BFA_TRUE;
3813}
3814
5fbe25c7 3815/*
da99dcc9 3816 * Send abort request to cleanup an active TM to firmware.
a36c61f9
KG
3817 */
3818static bfa_boolean_t
3819bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3820{
3821 struct bfa_itnim_s *itnim = tskim->itnim;
3822 struct bfi_tskim_abortreq_s *m;
3823
5fbe25c7 3824 /*
a36c61f9
KG
3825 * check for room in queue to send request now
3826 */
3827 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3828 if (!m)
3829 return BFA_FALSE;
3830
5fbe25c7 3831 /*
a36c61f9
KG
3832 * build i/o request message next
3833 */
3834 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3fd45980 3835 bfa_fn_lpu(tskim->bfa));
a36c61f9 3836
ba816ea8 3837 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9 3838
5fbe25c7 3839 /*
a36c61f9
KG
3840 * queue I/O message to firmware
3841 */
3fd45980 3842 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
a36c61f9
KG
3843 return BFA_TRUE;
3844}
3845
5fbe25c7 3846/*
da99dcc9 3847 * Call to resume task management cmnd waiting for room in request queue.
a36c61f9
KG
3848 */
3849static void
3850bfa_tskim_qresume(void *cbarg)
3851{
3852 struct bfa_tskim_s *tskim = cbarg;
3853
3854 bfa_stats(tskim->itnim, tm_qresumes);
3855 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3856}
3857
5fbe25c7 3858/*
a36c61f9
KG
3859 * Cleanup IOs associated with a task mangement command on IOC failures.
3860 */
3861static void
3862bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3863{
3864 struct bfa_ioim_s *ioim;
3865 struct list_head *qe, *qen;
3866
3867 list_for_each_safe(qe, qen, &tskim->io_q) {
3868 ioim = (struct bfa_ioim_s *) qe;
3869 bfa_ioim_iocdisable(ioim);
3870 }
3871}
3872
5fbe25c7 3873/*
a36c61f9
KG
3874 * Notification on completions from related ioim.
3875 */
3876void
3877bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3878{
3879 bfa_wc_down(&tskim->wc);
3880}
3881
5fbe25c7 3882/*
a36c61f9
KG
3883 * Handle IOC h/w failure notification from itnim.
3884 */
3885void
3886bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3887{
3888 tskim->notify = BFA_FALSE;
3889 bfa_stats(tskim->itnim, tm_iocdowns);
3890 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3891}
3892
5fbe25c7 3893/*
a36c61f9
KG
3894 * Cleanup TM command and associated IOs as part of ITNIM offline.
3895 */
3896void
3897bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3898{
3899 tskim->notify = BFA_TRUE;
3900 bfa_stats(tskim->itnim, tm_cleanups);
3901 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3902}
3903
5fbe25c7 3904/*
da99dcc9 3905 * Memory allocation and initialization.
a36c61f9
KG
3906 */
3907void
4507025d 3908bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
a36c61f9
KG
3909{
3910 struct bfa_tskim_s *tskim;
4507025d 3911 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
a36c61f9
KG
3912 u16 i;
3913
3914 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3fd45980 3915 INIT_LIST_HEAD(&fcpim->tskim_unused_q);
a36c61f9 3916
4507025d 3917 tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
a36c61f9
KG
3918 fcpim->tskim_arr = tskim;
3919
3920 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3921 /*
3922 * initialize TSKIM
3923 */
6a18b167 3924 memset(tskim, 0, sizeof(struct bfa_tskim_s));
a36c61f9
KG
3925 tskim->tsk_tag = i;
3926 tskim->bfa = fcpim->bfa;
3927 tskim->fcpim = fcpim;
3928 tskim->notify = BFA_FALSE;
3929 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3930 tskim);
3931 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3932
3933 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3934 }
3935
4507025d 3936 bfa_mem_kva_curp(fcp) = (u8 *) tskim;
a36c61f9
KG
3937}
3938
a36c61f9
KG
3939void
3940bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3941{
e2187d7f 3942 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
a36c61f9
KG
3943 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3944 struct bfa_tskim_s *tskim;
ba816ea8 3945 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
a36c61f9
KG
3946
3947 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
d4b671c5 3948 WARN_ON(tskim->tsk_tag != tsk_tag);
a36c61f9
KG
3949
3950 tskim->tsk_status = rsp->tsk_status;
3951
5fbe25c7 3952 /*
a36c61f9
KG
3953 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3954 * requests. All other statuses are for normal completions.
3955 */
3956 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3957 bfa_stats(tskim->itnim, tm_cleanup_comps);
3958 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3959 } else {
3960 bfa_stats(tskim->itnim, tm_fw_rsps);
3961 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3962 }
3963}
3964
3965
a36c61f9
KG
3966struct bfa_tskim_s *
3967bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3968{
e2187d7f 3969 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
a36c61f9
KG
3970 struct bfa_tskim_s *tskim;
3971
3972 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3973
3974 if (tskim)
3975 tskim->dtsk = dtsk;
3976
3977 return tskim;
3978}
3979
3980void
3981bfa_tskim_free(struct bfa_tskim_s *tskim)
3982{
d4b671c5 3983 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
a36c61f9
KG
3984 list_del(&tskim->qe);
3985 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3986}
3987
5fbe25c7 3988/*
da99dcc9 3989 * Start a task management command.
a36c61f9
KG
3990 *
3991 * @param[in] tskim BFA task management command instance
3992 * @param[in] itnim i-t nexus for the task management command
3993 * @param[in] lun lun, if applicable
3994 * @param[in] tm_cmnd Task management command code.
3995 * @param[in] t_secs Timeout in seconds
3996 *
3997 * @return None.
3998 */
3999void
f314878a
MZ
4000bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
4001 struct scsi_lun lun,
a36c61f9
KG
4002 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
4003{
4004 tskim->itnim = itnim;
4005 tskim->lun = lun;
4006 tskim->tm_cmnd = tm_cmnd;
4007 tskim->tsecs = tsecs;
4008 tskim->notify = BFA_FALSE;
4009 bfa_stats(itnim, tm_cmnds);
4010
4011 list_add_tail(&tskim->qe, &itnim->tsk_q);
4012 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
4013}
e2187d7f 4014
3fd45980
KG
4015void
4016bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
4017{
4018 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
4019 struct list_head *qe;
4020 int i;
4021
4022 for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
4023 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
4024 list_add_tail(qe, &fcpim->tskim_unused_q);
4025 }
4026}
4027
e2187d7f
KG
4028/* BFA FCP module - parent module for fcpim */
4029
4030BFA_MODULE(fcp);
4031
4032static void
4507025d
KG
4033bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4034 struct bfa_s *bfa)
e2187d7f 4035{
4507025d
KG
4036 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4037 struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
4038 struct bfa_mem_dma_s *seg_ptr;
4039 u16 nsegs, idx, per_seg_ios, num_io_req;
4040 u32 km_len = 0;
e2187d7f
KG
4041
4042 /*
4043 * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
4044 * So if the values are non zero, adjust them appropriately.
4045 */
4046 if (cfg->fwcfg.num_ioim_reqs &&
4047 cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
4048 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
4049 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
4050 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
4051
4052 if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
4053 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
4054
4055 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
4056 if (num_io_req > BFA_IO_MAX) {
4057 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
4058 cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
4059 cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
4060 } else if (cfg->fwcfg.num_fwtio_reqs)
4061 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
4062 else
4063 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
4064 }
4065
4507025d 4066 bfa_fcpim_meminfo(cfg, &km_len);
e2187d7f
KG
4067
4068 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
4507025d
KG
4069 km_len += num_io_req * sizeof(struct bfa_iotag_s);
4070 km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
4071
4072 /* dma memory */
4073 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
4074 per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
4075
4076 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
4077 if (num_io_req >= per_seg_ios) {
4078 num_io_req -= per_seg_ios;
4079 bfa_mem_dma_setup(minfo, seg_ptr,
4080 per_seg_ios * BFI_IOIM_SNSLEN);
4081 } else
4082 bfa_mem_dma_setup(minfo, seg_ptr,
4083 num_io_req * BFI_IOIM_SNSLEN);
4084 }
4085
4086 /* kva memory */
4087 bfa_mem_kva_setup(minfo, fcp_kva, km_len);
e2187d7f
KG
4088}
4089
4090static void
4091bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4507025d 4092 struct bfa_pcidev_s *pcidev)
e2187d7f
KG
4093{
4094 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4507025d
KG
4095 struct bfa_mem_dma_s *seg_ptr;
4096 u16 idx, nsegs, num_io_req;
e2187d7f
KG
4097
4098 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
4099 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
4507025d 4100 fcp->num_itns = cfg->fwcfg.num_rports;
e2187d7f
KG
4101 fcp->bfa = bfa;
4102
4507025d
KG
4103 /*
4104 * Setup the pool of snsbase addr's, that is passed to fw as
4105 * part of bfi_iocfc_cfg_s.
4106 */
4107 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
4108 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
4109
4110 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
4111
4112 if (!bfa_mem_dma_virt(seg_ptr))
4113 break;
4114
4115 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
4116 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
4117 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
4118 }
e2187d7f 4119
4507025d 4120 bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
e2187d7f 4121
4507025d 4122 bfa_iotag_attach(fcp);
e2187d7f 4123
4507025d
KG
4124 fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
4125 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
e2187d7f
KG
4126 (fcp->num_itns * sizeof(struct bfa_itn_s));
4127 memset(fcp->itn_arr, 0,
4128 (fcp->num_itns * sizeof(struct bfa_itn_s)));
e2187d7f
KG
4129}
4130
4131static void
4132bfa_fcp_detach(struct bfa_s *bfa)
4133{
4134}
4135
4136static void
4137bfa_fcp_start(struct bfa_s *bfa)
4138{
83763d59
KG
4139 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4140
4141 /*
4142 * bfa_init() with flash read is complete. now invalidate the stale
4143 * content of lun mask like unit attention, rp tag and lp tag.
4144 */
4145 bfa_ioim_lm_init(fcp->bfa);
e2187d7f
KG
4146}
4147
4148static void
4149bfa_fcp_stop(struct bfa_s *bfa)
4150{
4151}
4152
4153static void
4154bfa_fcp_iocdisable(struct bfa_s *bfa)
4155{
4156 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4157
3fd45980
KG
4158 /* Enqueue unused ioim resources to free_q */
4159 list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
4160
e2187d7f
KG
4161 bfa_fcpim_iocdisable(fcp);
4162}
4163
3fd45980
KG
4164void
4165bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
4166{
4167 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
4168 struct list_head *qe;
4169 int i;
4170
4171 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
4172 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
4173 list_add_tail(qe, &mod->iotag_unused_q);
4174 }
4175}
4176
e2187d7f
KG
4177void
4178bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
4179 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
4180{
4181 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4182 struct bfa_itn_s *itn;
4183
4184 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
4185 itn->isr = isr;
4186}
4187
4188/*
4189 * Itn interrupt processing.
4190 */
4191void
4192bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4193{
4194 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4195 union bfi_itn_i2h_msg_u msg;
4196 struct bfa_itn_s *itn;
4197
4198 msg.msg = m;
4199 itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
4200
4201 if (itn->isr)
4202 itn->isr(bfa, m);
4203 else
4204 WARN_ON(1);
4205}
4206
4207void
4507025d 4208bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
e2187d7f
KG
4209{
4210 struct bfa_iotag_s *iotag;
4211 u16 num_io_req, i;
4212
4507025d 4213 iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
e2187d7f
KG
4214 fcp->iotag_arr = iotag;
4215
4216 INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
4217 INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3fd45980 4218 INIT_LIST_HEAD(&fcp->iotag_unused_q);
e2187d7f
KG
4219
4220 num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
4221 for (i = 0; i < num_io_req; i++, iotag++) {
4222 memset(iotag, 0, sizeof(struct bfa_iotag_s));
4223 iotag->tag = i;
4224 if (i < fcp->num_ioim_reqs)
4225 list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
4226 else
4227 list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
4228 }
4229
4507025d 4230 bfa_mem_kva_curp(fcp) = (u8 *) iotag;
e2187d7f 4231}