]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/bfa/bfa_fcpim.c
f57c066725da0b342eb0a15f880e4c5d91cce4e3
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / bfa / bfa_fcpim.c
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20
21 BFA_TRC_FILE(HAL, FCPIM);
22 BFA_MODULE(fcpim);
23
24 /*
25 * BFA ITNIM Related definitions
26 */
27 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
28
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
30 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31
32 #define bfa_fcpim_additn(__itnim) \
33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim) do { \
35 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
36 bfa_itnim_update_del_itn_stats(__itnim); \
37 list_del(&(__itnim)->qe); \
38 WARN_ON(!list_empty(&(__itnim)->io_q)); \
39 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
40 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
41 } while (0)
42
43 #define bfa_itnim_online_cb(__itnim) do { \
44 if ((__itnim)->bfa->fcs) \
45 bfa_cb_itnim_online((__itnim)->ditn); \
46 else { \
47 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
48 __bfa_cb_itnim_online, (__itnim)); \
49 } \
50 } while (0)
51
52 #define bfa_itnim_offline_cb(__itnim) do { \
53 if ((__itnim)->bfa->fcs) \
54 bfa_cb_itnim_offline((__itnim)->ditn); \
55 else { \
56 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
57 __bfa_cb_itnim_offline, (__itnim)); \
58 } \
59 } while (0)
60
61 #define bfa_itnim_sler_cb(__itnim) do { \
62 if ((__itnim)->bfa->fcs) \
63 bfa_cb_itnim_sler((__itnim)->ditn); \
64 else { \
65 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
66 __bfa_cb_itnim_sler, (__itnim)); \
67 } \
68 } while (0)
69
70 /*
71 * itnim state machine event
72 */
73 enum bfa_itnim_event {
74 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
75 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
76 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
77 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
78 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
79 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
80 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
81 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
82 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
83 };
84
85 /*
86 * BFA IOIM related definitions
87 */
88 #define bfa_ioim_move_to_comp_q(__ioim) do { \
89 list_del(&(__ioim)->qe); \
90 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
91 } while (0)
92
93
94 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
95 if ((__fcpim)->profile_comp) \
96 (__fcpim)->profile_comp(__ioim); \
97 } while (0)
98
99 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
100 if ((__fcpim)->profile_start) \
101 (__fcpim)->profile_start(__ioim); \
102 } while (0)
103
104 /*
105 * IO state machine events
106 */
107 enum bfa_ioim_event {
108 BFA_IOIM_SM_START = 1, /* io start request from host */
109 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
110 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
111 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
112 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
113 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
114 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
115 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
116 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
117 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
118 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
119 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
120 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
121 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
122 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
123 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
124 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
125 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
126 };
127
128
129 /*
130 * BFA TSKIM related definitions
131 */
132
133 /*
134 * task management completion handling
135 */
136 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
137 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
138 bfa_tskim_notify_comp(__tskim); \
139 } while (0)
140
141 #define bfa_tskim_notify_comp(__tskim) do { \
142 if ((__tskim)->notify) \
143 bfa_itnim_tskdone((__tskim)->itnim); \
144 } while (0)
145
146
147 enum bfa_tskim_event {
148 BFA_TSKIM_SM_START = 1, /* TM command start */
149 BFA_TSKIM_SM_DONE = 2, /* TM completion */
150 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
151 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
152 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
153 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
154 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
155 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
156 };
157
158 /*
159 * forward declaration for BFA ITNIM functions
160 */
161 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
162 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
163 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
164 static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
165 static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
166 static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
167 static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
168 static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
169 static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
170 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
171 static void bfa_itnim_iotov(void *itnim_arg);
172 static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
173 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
174 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
175
176 /*
177 * forward declaration of ITNIM state machine
178 */
179 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
180 enum bfa_itnim_event event);
181 static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
182 enum bfa_itnim_event event);
183 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
184 enum bfa_itnim_event event);
185 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
186 enum bfa_itnim_event event);
187 static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
188 enum bfa_itnim_event event);
189 static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
190 enum bfa_itnim_event event);
191 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
192 enum bfa_itnim_event event);
193 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
194 enum bfa_itnim_event event);
195 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
196 enum bfa_itnim_event event);
197 static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
198 enum bfa_itnim_event event);
199 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
200 enum bfa_itnim_event event);
201 static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
202 enum bfa_itnim_event event);
203 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
204 enum bfa_itnim_event event);
205 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
206 enum bfa_itnim_event event);
207 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
208 enum bfa_itnim_event event);
209
210 /*
211 * forward declaration for BFA IOIM functions
212 */
213 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
214 static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
215 static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
216 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
217 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
218 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
219 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
220 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
221 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
222 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
223
224 /*
225 * forward declaration of BFA IO state machine
226 */
227 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
228 enum bfa_ioim_event event);
229 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
230 enum bfa_ioim_event event);
231 static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
232 enum bfa_ioim_event event);
233 static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
234 enum bfa_ioim_event event);
235 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
236 enum bfa_ioim_event event);
237 static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
238 enum bfa_ioim_event event);
239 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243 static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247 static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
251 /*
252 * forward declaration for BFA TSKIM functions
253 */
254 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
255 static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
256 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
257 struct scsi_lun lun);
258 static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
259 static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
260 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
261 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
262 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
263 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
264
265 /*
266 * forward declaration of BFA TSKIM state machine
267 */
268 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
269 enum bfa_tskim_event event);
270 static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
271 enum bfa_tskim_event event);
272 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
273 enum bfa_tskim_event event);
274 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
275 enum bfa_tskim_event event);
276 static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
277 enum bfa_tskim_event event);
278 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
279 enum bfa_tskim_event event);
280 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
281 enum bfa_tskim_event event);
282 /*
283 * BFA FCP Initiator Mode module
284 */
285
286 /*
287 * Compute and return memory needed by FCP(im) module.
288 */
289 static void
290 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
291 u32 *dm_len)
292 {
293 bfa_itnim_meminfo(cfg, km_len, dm_len);
294
295 /*
296 * IO memory
297 */
298 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
299 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
300 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
301 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
302
303 *km_len += cfg->fwcfg.num_ioim_reqs *
304 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
305
306 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
307
308 /*
309 * task management command memory
310 */
311 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
312 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
313 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
314 }
315
316
317 static void
318 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
319 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
320 {
321 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
322
323 bfa_trc(bfa, cfg->drvcfg.path_tov);
324 bfa_trc(bfa, cfg->fwcfg.num_rports);
325 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
326 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
327
328 fcpim->bfa = bfa;
329 fcpim->num_itnims = cfg->fwcfg.num_rports;
330 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
331 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
332 fcpim->path_tov = cfg->drvcfg.path_tov;
333 fcpim->delay_comp = cfg->drvcfg.delay_comp;
334 fcpim->profile_comp = NULL;
335 fcpim->profile_start = NULL;
336
337 bfa_itnim_attach(fcpim, meminfo);
338 bfa_tskim_attach(fcpim, meminfo);
339 bfa_ioim_attach(fcpim, meminfo);
340 }
341
342 static void
343 bfa_fcpim_detach(struct bfa_s *bfa)
344 {
345 }
346
347 static void
348 bfa_fcpim_start(struct bfa_s *bfa)
349 {
350 }
351
352 static void
353 bfa_fcpim_stop(struct bfa_s *bfa)
354 {
355 }
356
357 static void
358 bfa_fcpim_iocdisable(struct bfa_s *bfa)
359 {
360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361 struct bfa_itnim_s *itnim;
362 struct list_head *qe, *qen;
363
364 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
365 itnim = (struct bfa_itnim_s *) qe;
366 bfa_itnim_iocdisable(itnim);
367 }
368 }
369
370 void
371 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
372 {
373 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
374
375 fcpim->path_tov = path_tov * 1000;
376 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
377 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
378 }
379
380 u16
381 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
382 {
383 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
384
385 return fcpim->path_tov / 1000;
386 }
387
388 u16
389 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
390 {
391 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
392
393 return fcpim->q_depth;
394 }
395
396 /*
397 * BFA ITNIM module state machine functions
398 */
399
400 /*
401 * Beginning/unallocated state - no events expected.
402 */
403 static void
404 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
405 {
406 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
407 bfa_trc(itnim->bfa, event);
408
409 switch (event) {
410 case BFA_ITNIM_SM_CREATE:
411 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
412 itnim->is_online = BFA_FALSE;
413 bfa_fcpim_additn(itnim);
414 break;
415
416 default:
417 bfa_sm_fault(itnim->bfa, event);
418 }
419 }
420
421 /*
422 * Beginning state, only online event expected.
423 */
424 static void
425 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
426 {
427 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
428 bfa_trc(itnim->bfa, event);
429
430 switch (event) {
431 case BFA_ITNIM_SM_ONLINE:
432 if (bfa_itnim_send_fwcreate(itnim))
433 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
434 else
435 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
436 break;
437
438 case BFA_ITNIM_SM_DELETE:
439 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
440 bfa_fcpim_delitn(itnim);
441 break;
442
443 case BFA_ITNIM_SM_HWFAIL:
444 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
445 break;
446
447 default:
448 bfa_sm_fault(itnim->bfa, event);
449 }
450 }
451
452 /*
453 * Waiting for itnim create response from firmware.
454 */
455 static void
456 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
457 {
458 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
459 bfa_trc(itnim->bfa, event);
460
461 switch (event) {
462 case BFA_ITNIM_SM_FWRSP:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
464 itnim->is_online = BFA_TRUE;
465 bfa_itnim_iotov_online(itnim);
466 bfa_itnim_online_cb(itnim);
467 break;
468
469 case BFA_ITNIM_SM_DELETE:
470 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
471 break;
472
473 case BFA_ITNIM_SM_OFFLINE:
474 if (bfa_itnim_send_fwdelete(itnim))
475 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
476 else
477 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
478 break;
479
480 case BFA_ITNIM_SM_HWFAIL:
481 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
482 break;
483
484 default:
485 bfa_sm_fault(itnim->bfa, event);
486 }
487 }
488
489 static void
490 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
491 enum bfa_itnim_event event)
492 {
493 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
494 bfa_trc(itnim->bfa, event);
495
496 switch (event) {
497 case BFA_ITNIM_SM_QRESUME:
498 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
499 bfa_itnim_send_fwcreate(itnim);
500 break;
501
502 case BFA_ITNIM_SM_DELETE:
503 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
504 bfa_reqq_wcancel(&itnim->reqq_wait);
505 bfa_fcpim_delitn(itnim);
506 break;
507
508 case BFA_ITNIM_SM_OFFLINE:
509 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
510 bfa_reqq_wcancel(&itnim->reqq_wait);
511 bfa_itnim_offline_cb(itnim);
512 break;
513
514 case BFA_ITNIM_SM_HWFAIL:
515 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
516 bfa_reqq_wcancel(&itnim->reqq_wait);
517 break;
518
519 default:
520 bfa_sm_fault(itnim->bfa, event);
521 }
522 }
523
524 /*
525 * Waiting for itnim create response from firmware, a delete is pending.
526 */
527 static void
528 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
529 enum bfa_itnim_event event)
530 {
531 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
532 bfa_trc(itnim->bfa, event);
533
534 switch (event) {
535 case BFA_ITNIM_SM_FWRSP:
536 if (bfa_itnim_send_fwdelete(itnim))
537 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
538 else
539 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
540 break;
541
542 case BFA_ITNIM_SM_HWFAIL:
543 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
544 bfa_fcpim_delitn(itnim);
545 break;
546
547 default:
548 bfa_sm_fault(itnim->bfa, event);
549 }
550 }
551
552 /*
553 * Online state - normal parking state.
554 */
555 static void
556 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
557 {
558 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
559 bfa_trc(itnim->bfa, event);
560
561 switch (event) {
562 case BFA_ITNIM_SM_OFFLINE:
563 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
564 itnim->is_online = BFA_FALSE;
565 bfa_itnim_iotov_start(itnim);
566 bfa_itnim_cleanup(itnim);
567 break;
568
569 case BFA_ITNIM_SM_DELETE:
570 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
571 itnim->is_online = BFA_FALSE;
572 bfa_itnim_cleanup(itnim);
573 break;
574
575 case BFA_ITNIM_SM_SLER:
576 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
577 itnim->is_online = BFA_FALSE;
578 bfa_itnim_iotov_start(itnim);
579 bfa_itnim_sler_cb(itnim);
580 break;
581
582 case BFA_ITNIM_SM_HWFAIL:
583 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
584 itnim->is_online = BFA_FALSE;
585 bfa_itnim_iotov_start(itnim);
586 bfa_itnim_iocdisable_cleanup(itnim);
587 break;
588
589 default:
590 bfa_sm_fault(itnim->bfa, event);
591 }
592 }
593
594 /*
595 * Second level error recovery need.
596 */
597 static void
598 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
599 {
600 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
601 bfa_trc(itnim->bfa, event);
602
603 switch (event) {
604 case BFA_ITNIM_SM_OFFLINE:
605 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
606 bfa_itnim_cleanup(itnim);
607 break;
608
609 case BFA_ITNIM_SM_DELETE:
610 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
611 bfa_itnim_cleanup(itnim);
612 bfa_itnim_iotov_delete(itnim);
613 break;
614
615 case BFA_ITNIM_SM_HWFAIL:
616 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
617 bfa_itnim_iocdisable_cleanup(itnim);
618 break;
619
620 default:
621 bfa_sm_fault(itnim->bfa, event);
622 }
623 }
624
625 /*
626 * Going offline. Waiting for active IO cleanup.
627 */
628 static void
629 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
630 enum bfa_itnim_event event)
631 {
632 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
633 bfa_trc(itnim->bfa, event);
634
635 switch (event) {
636 case BFA_ITNIM_SM_CLEANUP:
637 if (bfa_itnim_send_fwdelete(itnim))
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
639 else
640 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
641 break;
642
643 case BFA_ITNIM_SM_DELETE:
644 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
645 bfa_itnim_iotov_delete(itnim);
646 break;
647
648 case BFA_ITNIM_SM_HWFAIL:
649 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
650 bfa_itnim_iocdisable_cleanup(itnim);
651 bfa_itnim_offline_cb(itnim);
652 break;
653
654 case BFA_ITNIM_SM_SLER:
655 break;
656
657 default:
658 bfa_sm_fault(itnim->bfa, event);
659 }
660 }
661
662 /*
663 * Deleting itnim. Waiting for active IO cleanup.
664 */
665 static void
666 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
667 enum bfa_itnim_event event)
668 {
669 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 bfa_trc(itnim->bfa, event);
671
672 switch (event) {
673 case BFA_ITNIM_SM_CLEANUP:
674 if (bfa_itnim_send_fwdelete(itnim))
675 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
676 else
677 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
678 break;
679
680 case BFA_ITNIM_SM_HWFAIL:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
682 bfa_itnim_iocdisable_cleanup(itnim);
683 break;
684
685 default:
686 bfa_sm_fault(itnim->bfa, event);
687 }
688 }
689
690 /*
691 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
692 */
693 static void
694 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
695 {
696 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 bfa_trc(itnim->bfa, event);
698
699 switch (event) {
700 case BFA_ITNIM_SM_FWRSP:
701 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
702 bfa_itnim_offline_cb(itnim);
703 break;
704
705 case BFA_ITNIM_SM_DELETE:
706 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
707 break;
708
709 case BFA_ITNIM_SM_HWFAIL:
710 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
711 bfa_itnim_offline_cb(itnim);
712 break;
713
714 default:
715 bfa_sm_fault(itnim->bfa, event);
716 }
717 }
718
719 static void
720 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
721 enum bfa_itnim_event event)
722 {
723 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
724 bfa_trc(itnim->bfa, event);
725
726 switch (event) {
727 case BFA_ITNIM_SM_QRESUME:
728 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
729 bfa_itnim_send_fwdelete(itnim);
730 break;
731
732 case BFA_ITNIM_SM_DELETE:
733 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
734 break;
735
736 case BFA_ITNIM_SM_HWFAIL:
737 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
738 bfa_reqq_wcancel(&itnim->reqq_wait);
739 bfa_itnim_offline_cb(itnim);
740 break;
741
742 default:
743 bfa_sm_fault(itnim->bfa, event);
744 }
745 }
746
747 /*
748 * Offline state.
749 */
750 static void
751 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
752 {
753 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
754 bfa_trc(itnim->bfa, event);
755
756 switch (event) {
757 case BFA_ITNIM_SM_DELETE:
758 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
759 bfa_itnim_iotov_delete(itnim);
760 bfa_fcpim_delitn(itnim);
761 break;
762
763 case BFA_ITNIM_SM_ONLINE:
764 if (bfa_itnim_send_fwcreate(itnim))
765 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
766 else
767 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
768 break;
769
770 case BFA_ITNIM_SM_HWFAIL:
771 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
772 break;
773
774 default:
775 bfa_sm_fault(itnim->bfa, event);
776 }
777 }
778
779 static void
780 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
781 enum bfa_itnim_event event)
782 {
783 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
784 bfa_trc(itnim->bfa, event);
785
786 switch (event) {
787 case BFA_ITNIM_SM_DELETE:
788 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
789 bfa_itnim_iotov_delete(itnim);
790 bfa_fcpim_delitn(itnim);
791 break;
792
793 case BFA_ITNIM_SM_OFFLINE:
794 bfa_itnim_offline_cb(itnim);
795 break;
796
797 case BFA_ITNIM_SM_ONLINE:
798 if (bfa_itnim_send_fwcreate(itnim))
799 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
800 else
801 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
802 break;
803
804 case BFA_ITNIM_SM_HWFAIL:
805 break;
806
807 default:
808 bfa_sm_fault(itnim->bfa, event);
809 }
810 }
811
812 /*
813 * Itnim is deleted, waiting for firmware response to delete.
814 */
815 static void
816 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
817 {
818 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
819 bfa_trc(itnim->bfa, event);
820
821 switch (event) {
822 case BFA_ITNIM_SM_FWRSP:
823 case BFA_ITNIM_SM_HWFAIL:
824 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
825 bfa_fcpim_delitn(itnim);
826 break;
827
828 default:
829 bfa_sm_fault(itnim->bfa, event);
830 }
831 }
832
833 static void
834 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
835 enum bfa_itnim_event event)
836 {
837 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
838 bfa_trc(itnim->bfa, event);
839
840 switch (event) {
841 case BFA_ITNIM_SM_QRESUME:
842 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
843 bfa_itnim_send_fwdelete(itnim);
844 break;
845
846 case BFA_ITNIM_SM_HWFAIL:
847 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
848 bfa_reqq_wcancel(&itnim->reqq_wait);
849 bfa_fcpim_delitn(itnim);
850 break;
851
852 default:
853 bfa_sm_fault(itnim->bfa, event);
854 }
855 }
856
857 /*
858 * Initiate cleanup of all IOs on an IOC failure.
859 */
860 static void
861 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
862 {
863 struct bfa_tskim_s *tskim;
864 struct bfa_ioim_s *ioim;
865 struct list_head *qe, *qen;
866
867 list_for_each_safe(qe, qen, &itnim->tsk_q) {
868 tskim = (struct bfa_tskim_s *) qe;
869 bfa_tskim_iocdisable(tskim);
870 }
871
872 list_for_each_safe(qe, qen, &itnim->io_q) {
873 ioim = (struct bfa_ioim_s *) qe;
874 bfa_ioim_iocdisable(ioim);
875 }
876
877 /*
878 * For IO request in pending queue, we pretend an early timeout.
879 */
880 list_for_each_safe(qe, qen, &itnim->pending_q) {
881 ioim = (struct bfa_ioim_s *) qe;
882 bfa_ioim_tov(ioim);
883 }
884
885 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
886 ioim = (struct bfa_ioim_s *) qe;
887 bfa_ioim_iocdisable(ioim);
888 }
889 }
890
891 /*
892 * IO cleanup completion
893 */
894 static void
895 bfa_itnim_cleanp_comp(void *itnim_cbarg)
896 {
897 struct bfa_itnim_s *itnim = itnim_cbarg;
898
899 bfa_stats(itnim, cleanup_comps);
900 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
901 }
902
903 /*
904 * Initiate cleanup of all IOs.
905 */
906 static void
907 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
908 {
909 struct bfa_ioim_s *ioim;
910 struct bfa_tskim_s *tskim;
911 struct list_head *qe, *qen;
912
913 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
914
915 list_for_each_safe(qe, qen, &itnim->io_q) {
916 ioim = (struct bfa_ioim_s *) qe;
917
918 /*
919 * Move IO to a cleanup queue from active queue so that a later
920 * TM will not pickup this IO.
921 */
922 list_del(&ioim->qe);
923 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
924
925 bfa_wc_up(&itnim->wc);
926 bfa_ioim_cleanup(ioim);
927 }
928
929 list_for_each_safe(qe, qen, &itnim->tsk_q) {
930 tskim = (struct bfa_tskim_s *) qe;
931 bfa_wc_up(&itnim->wc);
932 bfa_tskim_cleanup(tskim);
933 }
934
935 bfa_wc_wait(&itnim->wc);
936 }
937
938 static void
939 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
940 {
941 struct bfa_itnim_s *itnim = cbarg;
942
943 if (complete)
944 bfa_cb_itnim_online(itnim->ditn);
945 }
946
947 static void
948 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
949 {
950 struct bfa_itnim_s *itnim = cbarg;
951
952 if (complete)
953 bfa_cb_itnim_offline(itnim->ditn);
954 }
955
956 static void
957 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
958 {
959 struct bfa_itnim_s *itnim = cbarg;
960
961 if (complete)
962 bfa_cb_itnim_sler(itnim->ditn);
963 }
964
965 /*
966 * Call to resume any I/O requests waiting for room in request queue.
967 */
968 static void
969 bfa_itnim_qresume(void *cbarg)
970 {
971 struct bfa_itnim_s *itnim = cbarg;
972
973 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
974 }
975
976 /*
977 * bfa_itnim_public
978 */
979
980 void
981 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
982 {
983 bfa_wc_down(&itnim->wc);
984 }
985
986 void
987 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
988 {
989 bfa_wc_down(&itnim->wc);
990 }
991
992 void
993 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
994 u32 *dm_len)
995 {
996 /*
997 * ITN memory
998 */
999 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1000 }
1001
1002 void
1003 bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1004 {
1005 struct bfa_s *bfa = fcpim->bfa;
1006 struct bfa_itnim_s *itnim;
1007 int i, j;
1008
1009 INIT_LIST_HEAD(&fcpim->itnim_q);
1010
1011 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1012 fcpim->itnim_arr = itnim;
1013
1014 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1015 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1016 itnim->bfa = bfa;
1017 itnim->fcpim = fcpim;
1018 itnim->reqq = BFA_REQQ_QOS_LO;
1019 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1020 itnim->iotov_active = BFA_FALSE;
1021 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1022
1023 INIT_LIST_HEAD(&itnim->io_q);
1024 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1025 INIT_LIST_HEAD(&itnim->pending_q);
1026 INIT_LIST_HEAD(&itnim->tsk_q);
1027 INIT_LIST_HEAD(&itnim->delay_comp_q);
1028 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1029 itnim->ioprofile.io_latency.min[j] = ~0;
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 }
1032
1033 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1034 }
1035
1036 void
1037 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1038 {
1039 bfa_stats(itnim, ioc_disabled);
1040 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1041 }
1042
1043 static bfa_boolean_t
1044 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1045 {
1046 struct bfi_itnim_create_req_s *m;
1047
1048 itnim->msg_no++;
1049
1050 /*
1051 * check for room in queue to send request now
1052 */
1053 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1054 if (!m) {
1055 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1056 return BFA_FALSE;
1057 }
1058
1059 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1060 bfa_lpuid(itnim->bfa));
1061 m->fw_handle = itnim->rport->fw_handle;
1062 m->class = FC_CLASS_3;
1063 m->seq_rec = itnim->seq_rec;
1064 m->msg_no = itnim->msg_no;
1065 bfa_stats(itnim, fw_create);
1066
1067 /*
1068 * queue I/O message to firmware
1069 */
1070 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1071 return BFA_TRUE;
1072 }
1073
1074 static bfa_boolean_t
1075 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1076 {
1077 struct bfi_itnim_delete_req_s *m;
1078
1079 /*
1080 * check for room in queue to send request now
1081 */
1082 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1083 if (!m) {
1084 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1085 return BFA_FALSE;
1086 }
1087
1088 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1089 bfa_lpuid(itnim->bfa));
1090 m->fw_handle = itnim->rport->fw_handle;
1091 bfa_stats(itnim, fw_delete);
1092
1093 /*
1094 * queue I/O message to firmware
1095 */
1096 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1097 return BFA_TRUE;
1098 }
1099
1100 /*
1101 * Cleanup all pending failed inflight requests.
1102 */
1103 static void
1104 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1105 {
1106 struct bfa_ioim_s *ioim;
1107 struct list_head *qe, *qen;
1108
1109 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1110 ioim = (struct bfa_ioim_s *)qe;
1111 bfa_ioim_delayed_comp(ioim, iotov);
1112 }
1113 }
1114
1115 /*
1116 * Start all pending IO requests.
1117 */
1118 static void
1119 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1120 {
1121 struct bfa_ioim_s *ioim;
1122
1123 bfa_itnim_iotov_stop(itnim);
1124
1125 /*
1126 * Abort all inflight IO requests in the queue
1127 */
1128 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1129
1130 /*
1131 * Start all pending IO requests.
1132 */
1133 while (!list_empty(&itnim->pending_q)) {
1134 bfa_q_deq(&itnim->pending_q, &ioim);
1135 list_add_tail(&ioim->qe, &itnim->io_q);
1136 bfa_ioim_start(ioim);
1137 }
1138 }
1139
1140 /*
1141 * Fail all pending IO requests
1142 */
1143 static void
1144 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1145 {
1146 struct bfa_ioim_s *ioim;
1147
1148 /*
1149 * Fail all inflight IO requests in the queue
1150 */
1151 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1152
1153 /*
1154 * Fail any pending IO requests.
1155 */
1156 while (!list_empty(&itnim->pending_q)) {
1157 bfa_q_deq(&itnim->pending_q, &ioim);
1158 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1159 bfa_ioim_tov(ioim);
1160 }
1161 }
1162
1163 /*
1164 * IO TOV timer callback. Fail any pending IO requests.
1165 */
1166 static void
1167 bfa_itnim_iotov(void *itnim_arg)
1168 {
1169 struct bfa_itnim_s *itnim = itnim_arg;
1170
1171 itnim->iotov_active = BFA_FALSE;
1172
1173 bfa_cb_itnim_tov_begin(itnim->ditn);
1174 bfa_itnim_iotov_cleanup(itnim);
1175 bfa_cb_itnim_tov(itnim->ditn);
1176 }
1177
1178 /*
1179 * Start IO TOV timer for failing back pending IO requests in offline state.
1180 */
1181 static void
1182 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1183 {
1184 if (itnim->fcpim->path_tov > 0) {
1185
1186 itnim->iotov_active = BFA_TRUE;
1187 WARN_ON(!bfa_itnim_hold_io(itnim));
1188 bfa_timer_start(itnim->bfa, &itnim->timer,
1189 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1190 }
1191 }
1192
1193 /*
1194 * Stop IO TOV timer.
1195 */
1196 static void
1197 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1198 {
1199 if (itnim->iotov_active) {
1200 itnim->iotov_active = BFA_FALSE;
1201 bfa_timer_stop(&itnim->timer);
1202 }
1203 }
1204
1205 /*
1206 * Stop IO TOV timer.
1207 */
1208 static void
1209 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1210 {
1211 bfa_boolean_t pathtov_active = BFA_FALSE;
1212
1213 if (itnim->iotov_active)
1214 pathtov_active = BFA_TRUE;
1215
1216 bfa_itnim_iotov_stop(itnim);
1217 if (pathtov_active)
1218 bfa_cb_itnim_tov_begin(itnim->ditn);
1219 bfa_itnim_iotov_cleanup(itnim);
1220 if (pathtov_active)
1221 bfa_cb_itnim_tov(itnim->ditn);
1222 }
1223
1224 static void
1225 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1226 {
1227 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1228 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1229 itnim->stats.iocomp_aborted;
1230 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1231 itnim->stats.iocomp_timedout;
1232 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1233 itnim->stats.iocom_sqer_needed;
1234 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1235 itnim->stats.iocom_res_free;
1236 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1237 itnim->stats.iocom_hostabrts;
1238 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1239 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1240 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1241 }
1242
1243 /*
1244 * bfa_itnim_public
1245 */
1246
1247 /*
1248 * Itnim interrupt processing.
1249 */
1250 void
1251 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1252 {
1253 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1254 union bfi_itnim_i2h_msg_u msg;
1255 struct bfa_itnim_s *itnim;
1256
1257 bfa_trc(bfa, m->mhdr.msg_id);
1258
1259 msg.msg = m;
1260
1261 switch (m->mhdr.msg_id) {
1262 case BFI_ITNIM_I2H_CREATE_RSP:
1263 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1264 msg.create_rsp->bfa_handle);
1265 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1266 bfa_stats(itnim, create_comps);
1267 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1268 break;
1269
1270 case BFI_ITNIM_I2H_DELETE_RSP:
1271 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1272 msg.delete_rsp->bfa_handle);
1273 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1274 bfa_stats(itnim, delete_comps);
1275 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1276 break;
1277
1278 case BFI_ITNIM_I2H_SLER_EVENT:
1279 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1280 msg.sler_event->bfa_handle);
1281 bfa_stats(itnim, sler_events);
1282 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1283 break;
1284
1285 default:
1286 bfa_trc(bfa, m->mhdr.msg_id);
1287 WARN_ON(1);
1288 }
1289 }
1290
1291 /*
1292 * bfa_itnim_api
1293 */
1294
1295 struct bfa_itnim_s *
1296 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1297 {
1298 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1299 struct bfa_itnim_s *itnim;
1300
1301 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1302 WARN_ON(itnim->rport != rport);
1303
1304 itnim->ditn = ditn;
1305
1306 bfa_stats(itnim, creates);
1307 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1308
1309 return itnim;
1310 }
1311
1312 void
1313 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1314 {
1315 bfa_stats(itnim, deletes);
1316 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1317 }
1318
1319 void
1320 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1321 {
1322 itnim->seq_rec = seq_rec;
1323 bfa_stats(itnim, onlines);
1324 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1325 }
1326
1327 void
1328 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1329 {
1330 bfa_stats(itnim, offlines);
1331 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1332 }
1333
1334 /*
1335 * Return true if itnim is considered offline for holding off IO request.
1336 * IO is not held if itnim is being deleted.
1337 */
1338 bfa_boolean_t
1339 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1340 {
1341 return itnim->fcpim->path_tov && itnim->iotov_active &&
1342 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1343 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1344 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1345 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1346 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1347 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1348 }
1349
1350 void
1351 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1352 {
1353 int j;
1354 memset(&itnim->stats, 0, sizeof(itnim->stats));
1355 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1356 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1357 itnim->ioprofile.io_latency.min[j] = ~0;
1358 }
1359
1360 /*
1361 * BFA IO module state machine functions
1362 */
1363
1364 /*
1365 * IO is not started (unallocated).
1366 */
1367 static void
1368 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1369 {
1370 bfa_trc_fp(ioim->bfa, ioim->iotag);
1371 bfa_trc_fp(ioim->bfa, event);
1372
1373 switch (event) {
1374 case BFA_IOIM_SM_START:
1375 if (!bfa_itnim_is_online(ioim->itnim)) {
1376 if (!bfa_itnim_hold_io(ioim->itnim)) {
1377 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1378 list_del(&ioim->qe);
1379 list_add_tail(&ioim->qe,
1380 &ioim->fcpim->ioim_comp_q);
1381 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1382 __bfa_cb_ioim_pathtov, ioim);
1383 } else {
1384 list_del(&ioim->qe);
1385 list_add_tail(&ioim->qe,
1386 &ioim->itnim->pending_q);
1387 }
1388 break;
1389 }
1390
1391 if (ioim->nsges > BFI_SGE_INLINE) {
1392 if (!bfa_ioim_sgpg_alloc(ioim)) {
1393 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1394 return;
1395 }
1396 }
1397
1398 if (!bfa_ioim_send_ioreq(ioim)) {
1399 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1400 break;
1401 }
1402
1403 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1404 break;
1405
1406 case BFA_IOIM_SM_IOTOV:
1407 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1408 bfa_ioim_move_to_comp_q(ioim);
1409 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1410 __bfa_cb_ioim_pathtov, ioim);
1411 break;
1412
1413 case BFA_IOIM_SM_ABORT:
1414 /*
1415 * IO in pending queue can get abort requests. Complete abort
1416 * requests immediately.
1417 */
1418 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1419 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1420 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1421 __bfa_cb_ioim_abort, ioim);
1422 break;
1423
1424 default:
1425 bfa_sm_fault(ioim->bfa, event);
1426 }
1427 }
1428
1429 /*
1430 * IO is waiting for SG pages.
1431 */
1432 static void
1433 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1434 {
1435 bfa_trc(ioim->bfa, ioim->iotag);
1436 bfa_trc(ioim->bfa, event);
1437
1438 switch (event) {
1439 case BFA_IOIM_SM_SGALLOCED:
1440 if (!bfa_ioim_send_ioreq(ioim)) {
1441 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1442 break;
1443 }
1444 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1445 break;
1446
1447 case BFA_IOIM_SM_CLEANUP:
1448 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1449 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1450 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1451 ioim);
1452 bfa_ioim_notify_cleanup(ioim);
1453 break;
1454
1455 case BFA_IOIM_SM_ABORT:
1456 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1457 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1458 bfa_ioim_move_to_comp_q(ioim);
1459 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1460 ioim);
1461 break;
1462
1463 case BFA_IOIM_SM_HWFAIL:
1464 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1465 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1466 bfa_ioim_move_to_comp_q(ioim);
1467 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1468 ioim);
1469 break;
1470
1471 default:
1472 bfa_sm_fault(ioim->bfa, event);
1473 }
1474 }
1475
1476 /*
1477 * IO is active.
1478 */
1479 static void
1480 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1481 {
1482 bfa_trc_fp(ioim->bfa, ioim->iotag);
1483 bfa_trc_fp(ioim->bfa, event);
1484
1485 switch (event) {
1486 case BFA_IOIM_SM_COMP_GOOD:
1487 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1488 bfa_ioim_move_to_comp_q(ioim);
1489 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1490 __bfa_cb_ioim_good_comp, ioim);
1491 break;
1492
1493 case BFA_IOIM_SM_COMP:
1494 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1495 bfa_ioim_move_to_comp_q(ioim);
1496 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1497 ioim);
1498 break;
1499
1500 case BFA_IOIM_SM_DONE:
1501 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1502 bfa_ioim_move_to_comp_q(ioim);
1503 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1504 ioim);
1505 break;
1506
1507 case BFA_IOIM_SM_ABORT:
1508 ioim->iosp->abort_explicit = BFA_TRUE;
1509 ioim->io_cbfn = __bfa_cb_ioim_abort;
1510
1511 if (bfa_ioim_send_abort(ioim))
1512 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1513 else {
1514 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1515 bfa_stats(ioim->itnim, qwait);
1516 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1517 &ioim->iosp->reqq_wait);
1518 }
1519 break;
1520
1521 case BFA_IOIM_SM_CLEANUP:
1522 ioim->iosp->abort_explicit = BFA_FALSE;
1523 ioim->io_cbfn = __bfa_cb_ioim_failed;
1524
1525 if (bfa_ioim_send_abort(ioim))
1526 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1527 else {
1528 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1529 bfa_stats(ioim->itnim, qwait);
1530 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1531 &ioim->iosp->reqq_wait);
1532 }
1533 break;
1534
1535 case BFA_IOIM_SM_HWFAIL:
1536 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1537 bfa_ioim_move_to_comp_q(ioim);
1538 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1539 ioim);
1540 break;
1541
1542 case BFA_IOIM_SM_SQRETRY:
1543 if (bfa_ioim_maxretry_reached(ioim)) {
1544 /* max retry reached, free IO */
1545 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1546 bfa_ioim_move_to_comp_q(ioim);
1547 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1548 __bfa_cb_ioim_failed, ioim);
1549 break;
1550 }
1551 /* waiting for IO tag resource free */
1552 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1553 break;
1554
1555 default:
1556 bfa_sm_fault(ioim->bfa, event);
1557 }
1558 }
1559
1560 /*
1561 * IO is retried with new tag.
1562 */
1563 static void
1564 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1565 {
1566 bfa_trc_fp(ioim->bfa, ioim->iotag);
1567 bfa_trc_fp(ioim->bfa, event);
1568
1569 switch (event) {
1570 case BFA_IOIM_SM_FREE:
1571 /* abts and rrq done. Now retry the IO with new tag */
1572 bfa_ioim_update_iotag(ioim);
1573 if (!bfa_ioim_send_ioreq(ioim)) {
1574 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1575 break;
1576 }
1577 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1578 break;
1579
1580 case BFA_IOIM_SM_CLEANUP:
1581 ioim->iosp->abort_explicit = BFA_FALSE;
1582 ioim->io_cbfn = __bfa_cb_ioim_failed;
1583
1584 if (bfa_ioim_send_abort(ioim))
1585 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1586 else {
1587 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1588 bfa_stats(ioim->itnim, qwait);
1589 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1590 &ioim->iosp->reqq_wait);
1591 }
1592 break;
1593
1594 case BFA_IOIM_SM_HWFAIL:
1595 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1596 bfa_ioim_move_to_comp_q(ioim);
1597 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1598 __bfa_cb_ioim_failed, ioim);
1599 break;
1600
1601 case BFA_IOIM_SM_ABORT:
1602 /* in this state IO abort is done.
1603 * Waiting for IO tag resource free.
1604 */
1605 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1606 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1607 ioim);
1608 break;
1609
1610 default:
1611 bfa_sm_fault(ioim->bfa, event);
1612 }
1613 }
1614
1615 /*
1616 * IO is being aborted, waiting for completion from firmware.
1617 */
1618 static void
1619 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1620 {
1621 bfa_trc(ioim->bfa, ioim->iotag);
1622 bfa_trc(ioim->bfa, event);
1623
1624 switch (event) {
1625 case BFA_IOIM_SM_COMP_GOOD:
1626 case BFA_IOIM_SM_COMP:
1627 case BFA_IOIM_SM_DONE:
1628 case BFA_IOIM_SM_FREE:
1629 break;
1630
1631 case BFA_IOIM_SM_ABORT_DONE:
1632 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1633 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1634 ioim);
1635 break;
1636
1637 case BFA_IOIM_SM_ABORT_COMP:
1638 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1639 bfa_ioim_move_to_comp_q(ioim);
1640 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1641 ioim);
1642 break;
1643
1644 case BFA_IOIM_SM_COMP_UTAG:
1645 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1646 bfa_ioim_move_to_comp_q(ioim);
1647 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1648 ioim);
1649 break;
1650
1651 case BFA_IOIM_SM_CLEANUP:
1652 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1653 ioim->iosp->abort_explicit = BFA_FALSE;
1654
1655 if (bfa_ioim_send_abort(ioim))
1656 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1657 else {
1658 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1659 bfa_stats(ioim->itnim, qwait);
1660 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1661 &ioim->iosp->reqq_wait);
1662 }
1663 break;
1664
1665 case BFA_IOIM_SM_HWFAIL:
1666 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1667 bfa_ioim_move_to_comp_q(ioim);
1668 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1669 ioim);
1670 break;
1671
1672 default:
1673 bfa_sm_fault(ioim->bfa, event);
1674 }
1675 }
1676
1677 /*
1678 * IO is being cleaned up (implicit abort), waiting for completion from
1679 * firmware.
1680 */
1681 static void
1682 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1683 {
1684 bfa_trc(ioim->bfa, ioim->iotag);
1685 bfa_trc(ioim->bfa, event);
1686
1687 switch (event) {
1688 case BFA_IOIM_SM_COMP_GOOD:
1689 case BFA_IOIM_SM_COMP:
1690 case BFA_IOIM_SM_DONE:
1691 case BFA_IOIM_SM_FREE:
1692 break;
1693
1694 case BFA_IOIM_SM_ABORT:
1695 /*
1696 * IO is already being aborted implicitly
1697 */
1698 ioim->io_cbfn = __bfa_cb_ioim_abort;
1699 break;
1700
1701 case BFA_IOIM_SM_ABORT_DONE:
1702 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1703 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1704 bfa_ioim_notify_cleanup(ioim);
1705 break;
1706
1707 case BFA_IOIM_SM_ABORT_COMP:
1708 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1709 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1710 bfa_ioim_notify_cleanup(ioim);
1711 break;
1712
1713 case BFA_IOIM_SM_COMP_UTAG:
1714 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1715 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1716 bfa_ioim_notify_cleanup(ioim);
1717 break;
1718
1719 case BFA_IOIM_SM_HWFAIL:
1720 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1721 bfa_ioim_move_to_comp_q(ioim);
1722 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1723 ioim);
1724 break;
1725
1726 case BFA_IOIM_SM_CLEANUP:
1727 /*
1728 * IO can be in cleanup state already due to TM command.
1729 * 2nd cleanup request comes from ITN offline event.
1730 */
1731 break;
1732
1733 default:
1734 bfa_sm_fault(ioim->bfa, event);
1735 }
1736 }
1737
1738 /*
1739 * IO is waiting for room in request CQ
1740 */
1741 static void
1742 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1743 {
1744 bfa_trc(ioim->bfa, ioim->iotag);
1745 bfa_trc(ioim->bfa, event);
1746
1747 switch (event) {
1748 case BFA_IOIM_SM_QRESUME:
1749 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1750 bfa_ioim_send_ioreq(ioim);
1751 break;
1752
1753 case BFA_IOIM_SM_ABORT:
1754 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1755 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1756 bfa_ioim_move_to_comp_q(ioim);
1757 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1758 ioim);
1759 break;
1760
1761 case BFA_IOIM_SM_CLEANUP:
1762 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1763 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1764 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1765 ioim);
1766 bfa_ioim_notify_cleanup(ioim);
1767 break;
1768
1769 case BFA_IOIM_SM_HWFAIL:
1770 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1771 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1772 bfa_ioim_move_to_comp_q(ioim);
1773 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1774 ioim);
1775 break;
1776
1777 default:
1778 bfa_sm_fault(ioim->bfa, event);
1779 }
1780 }
1781
1782 /*
1783 * Active IO is being aborted, waiting for room in request CQ.
1784 */
1785 static void
1786 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1787 {
1788 bfa_trc(ioim->bfa, ioim->iotag);
1789 bfa_trc(ioim->bfa, event);
1790
1791 switch (event) {
1792 case BFA_IOIM_SM_QRESUME:
1793 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1794 bfa_ioim_send_abort(ioim);
1795 break;
1796
1797 case BFA_IOIM_SM_CLEANUP:
1798 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1799 ioim->iosp->abort_explicit = BFA_FALSE;
1800 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1801 break;
1802
1803 case BFA_IOIM_SM_COMP_GOOD:
1804 case BFA_IOIM_SM_COMP:
1805 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1806 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1807 bfa_ioim_move_to_comp_q(ioim);
1808 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1809 ioim);
1810 break;
1811
1812 case BFA_IOIM_SM_DONE:
1813 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1814 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1815 bfa_ioim_move_to_comp_q(ioim);
1816 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1817 ioim);
1818 break;
1819
1820 case BFA_IOIM_SM_HWFAIL:
1821 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1822 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1823 bfa_ioim_move_to_comp_q(ioim);
1824 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1825 ioim);
1826 break;
1827
1828 default:
1829 bfa_sm_fault(ioim->bfa, event);
1830 }
1831 }
1832
1833 /*
1834 * Active IO is being cleaned up, waiting for room in request CQ.
1835 */
1836 static void
1837 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1838 {
1839 bfa_trc(ioim->bfa, ioim->iotag);
1840 bfa_trc(ioim->bfa, event);
1841
1842 switch (event) {
1843 case BFA_IOIM_SM_QRESUME:
1844 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1845 bfa_ioim_send_abort(ioim);
1846 break;
1847
1848 case BFA_IOIM_SM_ABORT:
1849 /*
1850 * IO is alraedy being cleaned up implicitly
1851 */
1852 ioim->io_cbfn = __bfa_cb_ioim_abort;
1853 break;
1854
1855 case BFA_IOIM_SM_COMP_GOOD:
1856 case BFA_IOIM_SM_COMP:
1857 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1858 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1859 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1860 bfa_ioim_notify_cleanup(ioim);
1861 break;
1862
1863 case BFA_IOIM_SM_DONE:
1864 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1865 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1866 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1867 bfa_ioim_notify_cleanup(ioim);
1868 break;
1869
1870 case BFA_IOIM_SM_HWFAIL:
1871 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1872 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1873 bfa_ioim_move_to_comp_q(ioim);
1874 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1875 ioim);
1876 break;
1877
1878 default:
1879 bfa_sm_fault(ioim->bfa, event);
1880 }
1881 }
1882
1883 /*
1884 * IO bfa callback is pending.
1885 */
1886 static void
1887 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1888 {
1889 bfa_trc_fp(ioim->bfa, ioim->iotag);
1890 bfa_trc_fp(ioim->bfa, event);
1891
1892 switch (event) {
1893 case BFA_IOIM_SM_HCB:
1894 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1895 bfa_ioim_free(ioim);
1896 break;
1897
1898 case BFA_IOIM_SM_CLEANUP:
1899 bfa_ioim_notify_cleanup(ioim);
1900 break;
1901
1902 case BFA_IOIM_SM_HWFAIL:
1903 break;
1904
1905 default:
1906 bfa_sm_fault(ioim->bfa, event);
1907 }
1908 }
1909
1910 /*
1911 * IO bfa callback is pending. IO resource cannot be freed.
1912 */
1913 static void
1914 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1915 {
1916 bfa_trc(ioim->bfa, ioim->iotag);
1917 bfa_trc(ioim->bfa, event);
1918
1919 switch (event) {
1920 case BFA_IOIM_SM_HCB:
1921 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1922 list_del(&ioim->qe);
1923 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1924 break;
1925
1926 case BFA_IOIM_SM_FREE:
1927 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1928 break;
1929
1930 case BFA_IOIM_SM_CLEANUP:
1931 bfa_ioim_notify_cleanup(ioim);
1932 break;
1933
1934 case BFA_IOIM_SM_HWFAIL:
1935 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1936 break;
1937
1938 default:
1939 bfa_sm_fault(ioim->bfa, event);
1940 }
1941 }
1942
1943 /*
1944 * IO is completed, waiting resource free from firmware.
1945 */
1946 static void
1947 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1948 {
1949 bfa_trc(ioim->bfa, ioim->iotag);
1950 bfa_trc(ioim->bfa, event);
1951
1952 switch (event) {
1953 case BFA_IOIM_SM_FREE:
1954 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1955 bfa_ioim_free(ioim);
1956 break;
1957
1958 case BFA_IOIM_SM_CLEANUP:
1959 bfa_ioim_notify_cleanup(ioim);
1960 break;
1961
1962 case BFA_IOIM_SM_HWFAIL:
1963 break;
1964
1965 default:
1966 bfa_sm_fault(ioim->bfa, event);
1967 }
1968 }
1969
1970
1971 static void
1972 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
1973 {
1974 struct bfa_ioim_s *ioim = cbarg;
1975
1976 if (!complete) {
1977 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1978 return;
1979 }
1980
1981 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
1982 }
1983
1984 static void
1985 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1986 {
1987 struct bfa_ioim_s *ioim = cbarg;
1988 struct bfi_ioim_rsp_s *m;
1989 u8 *snsinfo = NULL;
1990 u8 sns_len = 0;
1991 s32 residue = 0;
1992
1993 if (!complete) {
1994 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1995 return;
1996 }
1997
1998 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
1999 if (m->io_status == BFI_IOIM_STS_OK) {
2000 /*
2001 * setup sense information, if present
2002 */
2003 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2004 m->sns_len) {
2005 sns_len = m->sns_len;
2006 snsinfo = ioim->iosp->snsinfo;
2007 }
2008
2009 /*
2010 * setup residue value correctly for normal completions
2011 */
2012 if (m->resid_flags == FCP_RESID_UNDER) {
2013 residue = be32_to_cpu(m->residue);
2014 bfa_stats(ioim->itnim, iocomp_underrun);
2015 }
2016 if (m->resid_flags == FCP_RESID_OVER) {
2017 residue = be32_to_cpu(m->residue);
2018 residue = -residue;
2019 bfa_stats(ioim->itnim, iocomp_overrun);
2020 }
2021 }
2022
2023 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2024 m->scsi_status, sns_len, snsinfo, residue);
2025 }
2026
2027 static void
2028 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2029 {
2030 struct bfa_ioim_s *ioim = cbarg;
2031
2032 if (!complete) {
2033 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2034 return;
2035 }
2036
2037 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2038 0, 0, NULL, 0);
2039 }
2040
2041 static void
2042 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2043 {
2044 struct bfa_ioim_s *ioim = cbarg;
2045
2046 bfa_stats(ioim->itnim, path_tov_expired);
2047 if (!complete) {
2048 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2049 return;
2050 }
2051
2052 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2053 0, 0, NULL, 0);
2054 }
2055
2056 static void
2057 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2058 {
2059 struct bfa_ioim_s *ioim = cbarg;
2060
2061 if (!complete) {
2062 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2063 return;
2064 }
2065
2066 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2067 }
2068
2069 static void
2070 bfa_ioim_sgpg_alloced(void *cbarg)
2071 {
2072 struct bfa_ioim_s *ioim = cbarg;
2073
2074 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2075 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2076 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2077 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2078 }
2079
2080 /*
2081 * Send I/O request to firmware.
2082 */
2083 static bfa_boolean_t
2084 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2085 {
2086 struct bfa_itnim_s *itnim = ioim->itnim;
2087 struct bfi_ioim_req_s *m;
2088 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2089 struct bfi_sge_s *sge, *sgpge;
2090 u32 pgdlen = 0;
2091 u32 fcp_dl;
2092 u64 addr;
2093 struct scatterlist *sg;
2094 struct bfa_sgpg_s *sgpg;
2095 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2096 u32 i, sge_id, pgcumsz;
2097 enum dma_data_direction dmadir;
2098
2099 /*
2100 * check for room in queue to send request now
2101 */
2102 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2103 if (!m) {
2104 bfa_stats(ioim->itnim, qwait);
2105 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2106 &ioim->iosp->reqq_wait);
2107 return BFA_FALSE;
2108 }
2109
2110 /*
2111 * build i/o request message next
2112 */
2113 m->io_tag = cpu_to_be16(ioim->iotag);
2114 m->rport_hdl = ioim->itnim->rport->fw_handle;
2115 m->io_timeout = 0;
2116
2117 sge = &m->sges[0];
2118 sgpg = ioim->sgpg;
2119 sge_id = 0;
2120 sgpge = NULL;
2121 pgcumsz = 0;
2122 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2123 if (i == 0) {
2124 /* build inline IO SG element */
2125 addr = bfa_sgaddr_le(sg_dma_address(sg));
2126 sge->sga = *(union bfi_addr_u *) &addr;
2127 pgdlen = sg_dma_len(sg);
2128 sge->sg_len = pgdlen;
2129 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2130 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2131 bfa_sge_to_be(sge);
2132 sge++;
2133 } else {
2134 if (sge_id == 0)
2135 sgpge = sgpg->sgpg->sges;
2136
2137 addr = bfa_sgaddr_le(sg_dma_address(sg));
2138 sgpge->sga = *(union bfi_addr_u *) &addr;
2139 sgpge->sg_len = sg_dma_len(sg);
2140 pgcumsz += sgpge->sg_len;
2141
2142 /* set flags */
2143 if (i < (ioim->nsges - 1) &&
2144 sge_id < (BFI_SGPG_DATA_SGES - 1))
2145 sgpge->flags = BFI_SGE_DATA;
2146 else if (i < (ioim->nsges - 1))
2147 sgpge->flags = BFI_SGE_DATA_CPL;
2148 else
2149 sgpge->flags = BFI_SGE_DATA_LAST;
2150
2151 bfa_sge_to_le(sgpge);
2152
2153 sgpge++;
2154 if (i == (ioim->nsges - 1)) {
2155 sgpge->flags = BFI_SGE_PGDLEN;
2156 sgpge->sga.a32.addr_lo = 0;
2157 sgpge->sga.a32.addr_hi = 0;
2158 sgpge->sg_len = pgcumsz;
2159 bfa_sge_to_le(sgpge);
2160 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2161 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2162 sgpge->flags = BFI_SGE_LINK;
2163 sgpge->sga = sgpg->sgpg_pa;
2164 sgpge->sg_len = pgcumsz;
2165 bfa_sge_to_le(sgpge);
2166 sge_id = 0;
2167 pgcumsz = 0;
2168 }
2169 }
2170 }
2171
2172 if (ioim->nsges > BFI_SGE_INLINE) {
2173 sge->sga = ioim->sgpg->sgpg_pa;
2174 } else {
2175 sge->sga.a32.addr_lo = 0;
2176 sge->sga.a32.addr_hi = 0;
2177 }
2178 sge->sg_len = pgdlen;
2179 sge->flags = BFI_SGE_PGDLEN;
2180 bfa_sge_to_be(sge);
2181
2182 /*
2183 * set up I/O command parameters
2184 */
2185 m->cmnd = cmnd_z0;
2186 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2187 dmadir = cmnd->sc_data_direction;
2188 if (dmadir == DMA_TO_DEVICE)
2189 m->cmnd.iodir = FCP_IODIR_WRITE;
2190 else if (dmadir == DMA_FROM_DEVICE)
2191 m->cmnd.iodir = FCP_IODIR_READ;
2192 else
2193 m->cmnd.iodir = FCP_IODIR_NONE;
2194
2195 m->cmnd.cdb = *(scsi_cdb_t *) cmnd->cmnd;
2196 fcp_dl = scsi_bufflen(cmnd);
2197 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2198
2199 /*
2200 * set up I/O message header
2201 */
2202 switch (m->cmnd.iodir) {
2203 case FCP_IODIR_READ:
2204 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2205 bfa_stats(itnim, input_reqs);
2206 ioim->itnim->stats.rd_throughput += fcp_dl;
2207 break;
2208 case FCP_IODIR_WRITE:
2209 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2210 bfa_stats(itnim, output_reqs);
2211 ioim->itnim->stats.wr_throughput += fcp_dl;
2212 break;
2213 case FCP_IODIR_RW:
2214 bfa_stats(itnim, input_reqs);
2215 bfa_stats(itnim, output_reqs);
2216 default:
2217 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2218 }
2219 if (itnim->seq_rec ||
2220 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2221 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2222
2223 /*
2224 * queue I/O message to firmware
2225 */
2226 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2227 return BFA_TRUE;
2228 }
2229
2230 /*
2231 * Setup any additional SG pages needed.Inline SG element is setup
2232 * at queuing time.
2233 */
2234 static bfa_boolean_t
2235 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2236 {
2237 u16 nsgpgs;
2238
2239 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2240
2241 /*
2242 * allocate SG pages needed
2243 */
2244 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2245 if (!nsgpgs)
2246 return BFA_TRUE;
2247
2248 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2249 != BFA_STATUS_OK) {
2250 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2251 return BFA_FALSE;
2252 }
2253
2254 ioim->nsgpgs = nsgpgs;
2255 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2256
2257 return BFA_TRUE;
2258 }
2259
2260 /*
2261 * Send I/O abort request to firmware.
2262 */
2263 static bfa_boolean_t
2264 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2265 {
2266 struct bfi_ioim_abort_req_s *m;
2267 enum bfi_ioim_h2i msgop;
2268
2269 /*
2270 * check for room in queue to send request now
2271 */
2272 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2273 if (!m)
2274 return BFA_FALSE;
2275
2276 /*
2277 * build i/o request message next
2278 */
2279 if (ioim->iosp->abort_explicit)
2280 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2281 else
2282 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2283
2284 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2285 m->io_tag = cpu_to_be16(ioim->iotag);
2286 m->abort_tag = ++ioim->abort_tag;
2287
2288 /*
2289 * queue I/O message to firmware
2290 */
2291 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2292 return BFA_TRUE;
2293 }
2294
2295 /*
2296 * Call to resume any I/O requests waiting for room in request queue.
2297 */
2298 static void
2299 bfa_ioim_qresume(void *cbarg)
2300 {
2301 struct bfa_ioim_s *ioim = cbarg;
2302
2303 bfa_stats(ioim->itnim, qresumes);
2304 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2305 }
2306
2307
2308 static void
2309 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2310 {
2311 /*
2312 * Move IO from itnim queue to fcpim global queue since itnim will be
2313 * freed.
2314 */
2315 list_del(&ioim->qe);
2316 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2317
2318 if (!ioim->iosp->tskim) {
2319 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2320 bfa_cb_dequeue(&ioim->hcb_qe);
2321 list_del(&ioim->qe);
2322 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2323 }
2324 bfa_itnim_iodone(ioim->itnim);
2325 } else
2326 bfa_wc_down(&ioim->iosp->tskim->wc);
2327 }
2328
2329 static bfa_boolean_t
2330 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2331 {
2332 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2333 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2334 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2335 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2336 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2337 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2338 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2339 return BFA_FALSE;
2340
2341 return BFA_TRUE;
2342 }
2343
2344 void
2345 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2346 {
2347 /*
2348 * If path tov timer expired, failback with PATHTOV status - these
2349 * IO requests are not normally retried by IO stack.
2350 *
2351 * Otherwise device cameback online and fail it with normal failed
2352 * status so that IO stack retries these failed IO requests.
2353 */
2354 if (iotov)
2355 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2356 else {
2357 ioim->io_cbfn = __bfa_cb_ioim_failed;
2358 bfa_stats(ioim->itnim, iocom_nexus_abort);
2359 }
2360 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2361
2362 /*
2363 * Move IO to fcpim global queue since itnim will be
2364 * freed.
2365 */
2366 list_del(&ioim->qe);
2367 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2368 }
2369
2370
2371 /*
2372 * Memory allocation and initialization.
2373 */
2374 void
2375 bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2376 {
2377 struct bfa_ioim_s *ioim;
2378 struct bfa_ioim_sp_s *iosp;
2379 u16 i;
2380 u8 *snsinfo;
2381 u32 snsbufsz;
2382
2383 /*
2384 * claim memory first
2385 */
2386 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2387 fcpim->ioim_arr = ioim;
2388 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2389
2390 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2391 fcpim->ioim_sp_arr = iosp;
2392 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2393
2394 /*
2395 * Claim DMA memory for per IO sense data.
2396 */
2397 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2398 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2399 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2400
2401 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2402 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2403 snsinfo = fcpim->snsbase.kva;
2404 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2405
2406 /*
2407 * Initialize ioim free queues
2408 */
2409 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2410 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2411 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2412
2413 for (i = 0; i < fcpim->num_ioim_reqs;
2414 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2415 /*
2416 * initialize IOIM
2417 */
2418 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2419 ioim->iotag = i;
2420 ioim->bfa = fcpim->bfa;
2421 ioim->fcpim = fcpim;
2422 ioim->iosp = iosp;
2423 iosp->snsinfo = snsinfo;
2424 INIT_LIST_HEAD(&ioim->sgpg_q);
2425 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2426 bfa_ioim_qresume, ioim);
2427 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2428 bfa_ioim_sgpg_alloced, ioim);
2429 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2430
2431 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2432 }
2433 }
2434
2435 void
2436 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2437 {
2438 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2439 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2440 struct bfa_ioim_s *ioim;
2441 u16 iotag;
2442 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2443
2444 iotag = be16_to_cpu(rsp->io_tag);
2445
2446 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2447 WARN_ON(ioim->iotag != iotag);
2448
2449 bfa_trc(ioim->bfa, ioim->iotag);
2450 bfa_trc(ioim->bfa, rsp->io_status);
2451 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2452
2453 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2454 ioim->iosp->comp_rspmsg = *m;
2455
2456 switch (rsp->io_status) {
2457 case BFI_IOIM_STS_OK:
2458 bfa_stats(ioim->itnim, iocomp_ok);
2459 if (rsp->reuse_io_tag == 0)
2460 evt = BFA_IOIM_SM_DONE;
2461 else
2462 evt = BFA_IOIM_SM_COMP;
2463 break;
2464
2465 case BFI_IOIM_STS_TIMEDOUT:
2466 bfa_stats(ioim->itnim, iocomp_timedout);
2467 case BFI_IOIM_STS_ABORTED:
2468 rsp->io_status = BFI_IOIM_STS_ABORTED;
2469 bfa_stats(ioim->itnim, iocomp_aborted);
2470 if (rsp->reuse_io_tag == 0)
2471 evt = BFA_IOIM_SM_DONE;
2472 else
2473 evt = BFA_IOIM_SM_COMP;
2474 break;
2475
2476 case BFI_IOIM_STS_PROTO_ERR:
2477 bfa_stats(ioim->itnim, iocom_proto_err);
2478 WARN_ON(!rsp->reuse_io_tag);
2479 evt = BFA_IOIM_SM_COMP;
2480 break;
2481
2482 case BFI_IOIM_STS_SQER_NEEDED:
2483 bfa_stats(ioim->itnim, iocom_sqer_needed);
2484 WARN_ON(rsp->reuse_io_tag != 0);
2485 evt = BFA_IOIM_SM_SQRETRY;
2486 break;
2487
2488 case BFI_IOIM_STS_RES_FREE:
2489 bfa_stats(ioim->itnim, iocom_res_free);
2490 evt = BFA_IOIM_SM_FREE;
2491 break;
2492
2493 case BFI_IOIM_STS_HOST_ABORTED:
2494 bfa_stats(ioim->itnim, iocom_hostabrts);
2495 if (rsp->abort_tag != ioim->abort_tag) {
2496 bfa_trc(ioim->bfa, rsp->abort_tag);
2497 bfa_trc(ioim->bfa, ioim->abort_tag);
2498 return;
2499 }
2500
2501 if (rsp->reuse_io_tag)
2502 evt = BFA_IOIM_SM_ABORT_COMP;
2503 else
2504 evt = BFA_IOIM_SM_ABORT_DONE;
2505 break;
2506
2507 case BFI_IOIM_STS_UTAG:
2508 bfa_stats(ioim->itnim, iocom_utags);
2509 evt = BFA_IOIM_SM_COMP_UTAG;
2510 break;
2511
2512 default:
2513 WARN_ON(1);
2514 }
2515
2516 bfa_sm_send_event(ioim, evt);
2517 }
2518
2519 void
2520 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2521 {
2522 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2523 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2524 struct bfa_ioim_s *ioim;
2525 u16 iotag;
2526
2527 iotag = be16_to_cpu(rsp->io_tag);
2528
2529 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2530 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2531
2532 bfa_trc_fp(ioim->bfa, ioim->iotag);
2533 bfa_ioim_cb_profile_comp(fcpim, ioim);
2534
2535 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2536 }
2537
2538 /*
2539 * Called by itnim to clean up IO while going offline.
2540 */
2541 void
2542 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2543 {
2544 bfa_trc(ioim->bfa, ioim->iotag);
2545 bfa_stats(ioim->itnim, io_cleanups);
2546
2547 ioim->iosp->tskim = NULL;
2548 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2549 }
2550
2551 void
2552 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2553 {
2554 bfa_trc(ioim->bfa, ioim->iotag);
2555 bfa_stats(ioim->itnim, io_tmaborts);
2556
2557 ioim->iosp->tskim = tskim;
2558 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2559 }
2560
2561 /*
2562 * IOC failure handling.
2563 */
2564 void
2565 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2566 {
2567 bfa_trc(ioim->bfa, ioim->iotag);
2568 bfa_stats(ioim->itnim, io_iocdowns);
2569 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2570 }
2571
2572 /*
2573 * IO offline TOV popped. Fail the pending IO.
2574 */
2575 void
2576 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2577 {
2578 bfa_trc(ioim->bfa, ioim->iotag);
2579 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2580 }
2581
2582
2583 /*
2584 * Allocate IOIM resource for initiator mode I/O request.
2585 */
2586 struct bfa_ioim_s *
2587 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2588 struct bfa_itnim_s *itnim, u16 nsges)
2589 {
2590 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2591 struct bfa_ioim_s *ioim;
2592
2593 /*
2594 * alocate IOIM resource
2595 */
2596 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2597 if (!ioim) {
2598 bfa_stats(itnim, no_iotags);
2599 return NULL;
2600 }
2601
2602 ioim->dio = dio;
2603 ioim->itnim = itnim;
2604 ioim->nsges = nsges;
2605 ioim->nsgpgs = 0;
2606
2607 bfa_stats(itnim, total_ios);
2608 fcpim->ios_active++;
2609
2610 list_add_tail(&ioim->qe, &itnim->io_q);
2611 bfa_trc_fp(ioim->bfa, ioim->iotag);
2612
2613 return ioim;
2614 }
2615
2616 void
2617 bfa_ioim_free(struct bfa_ioim_s *ioim)
2618 {
2619 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2620
2621 bfa_trc_fp(ioim->bfa, ioim->iotag);
2622 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2623
2624 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2625 (ioim->nsges > BFI_SGE_INLINE));
2626
2627 if (ioim->nsgpgs > 0)
2628 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2629
2630 bfa_stats(ioim->itnim, io_comps);
2631 fcpim->ios_active--;
2632
2633 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2634 list_del(&ioim->qe);
2635 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2636 }
2637
2638 void
2639 bfa_ioim_start(struct bfa_ioim_s *ioim)
2640 {
2641 bfa_trc_fp(ioim->bfa, ioim->iotag);
2642
2643 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2644
2645 /*
2646 * Obtain the queue over which this request has to be issued
2647 */
2648 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2649 BFA_FALSE : bfa_itnim_get_reqq(ioim);
2650
2651 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2652 }
2653
2654 /*
2655 * Driver I/O abort request.
2656 */
2657 bfa_status_t
2658 bfa_ioim_abort(struct bfa_ioim_s *ioim)
2659 {
2660
2661 bfa_trc(ioim->bfa, ioim->iotag);
2662
2663 if (!bfa_ioim_is_abortable(ioim))
2664 return BFA_STATUS_FAILED;
2665
2666 bfa_stats(ioim->itnim, io_aborts);
2667 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2668
2669 return BFA_STATUS_OK;
2670 }
2671
2672 /*
2673 * BFA TSKIM state machine functions
2674 */
2675
2676 /*
2677 * Task management command beginning state.
2678 */
2679 static void
2680 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2681 {
2682 bfa_trc(tskim->bfa, event);
2683
2684 switch (event) {
2685 case BFA_TSKIM_SM_START:
2686 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2687 bfa_tskim_gather_ios(tskim);
2688
2689 /*
2690 * If device is offline, do not send TM on wire. Just cleanup
2691 * any pending IO requests and complete TM request.
2692 */
2693 if (!bfa_itnim_is_online(tskim->itnim)) {
2694 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2695 tskim->tsk_status = BFI_TSKIM_STS_OK;
2696 bfa_tskim_cleanup_ios(tskim);
2697 return;
2698 }
2699
2700 if (!bfa_tskim_send(tskim)) {
2701 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2702 bfa_stats(tskim->itnim, tm_qwait);
2703 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2704 &tskim->reqq_wait);
2705 }
2706 break;
2707
2708 default:
2709 bfa_sm_fault(tskim->bfa, event);
2710 }
2711 }
2712
2713 /*
2714 * TM command is active, awaiting completion from firmware to
2715 * cleanup IO requests in TM scope.
2716 */
2717 static void
2718 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2719 {
2720 bfa_trc(tskim->bfa, event);
2721
2722 switch (event) {
2723 case BFA_TSKIM_SM_DONE:
2724 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2725 bfa_tskim_cleanup_ios(tskim);
2726 break;
2727
2728 case BFA_TSKIM_SM_CLEANUP:
2729 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2730 if (!bfa_tskim_send_abort(tskim)) {
2731 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2732 bfa_stats(tskim->itnim, tm_qwait);
2733 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2734 &tskim->reqq_wait);
2735 }
2736 break;
2737
2738 case BFA_TSKIM_SM_HWFAIL:
2739 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2740 bfa_tskim_iocdisable_ios(tskim);
2741 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2742 break;
2743
2744 default:
2745 bfa_sm_fault(tskim->bfa, event);
2746 }
2747 }
2748
2749 /*
2750 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2751 * completion event from firmware.
2752 */
2753 static void
2754 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2755 {
2756 bfa_trc(tskim->bfa, event);
2757
2758 switch (event) {
2759 case BFA_TSKIM_SM_DONE:
2760 /*
2761 * Ignore and wait for ABORT completion from firmware.
2762 */
2763 break;
2764
2765 case BFA_TSKIM_SM_CLEANUP_DONE:
2766 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2767 bfa_tskim_cleanup_ios(tskim);
2768 break;
2769
2770 case BFA_TSKIM_SM_HWFAIL:
2771 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2772 bfa_tskim_iocdisable_ios(tskim);
2773 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2774 break;
2775
2776 default:
2777 bfa_sm_fault(tskim->bfa, event);
2778 }
2779 }
2780
2781 static void
2782 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2783 {
2784 bfa_trc(tskim->bfa, event);
2785
2786 switch (event) {
2787 case BFA_TSKIM_SM_IOS_DONE:
2788 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2789 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2790 break;
2791
2792 case BFA_TSKIM_SM_CLEANUP:
2793 /*
2794 * Ignore, TM command completed on wire.
2795 * Notify TM conmpletion on IO cleanup completion.
2796 */
2797 break;
2798
2799 case BFA_TSKIM_SM_HWFAIL:
2800 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2801 bfa_tskim_iocdisable_ios(tskim);
2802 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2803 break;
2804
2805 default:
2806 bfa_sm_fault(tskim->bfa, event);
2807 }
2808 }
2809
2810 /*
2811 * Task management command is waiting for room in request CQ
2812 */
2813 static void
2814 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2815 {
2816 bfa_trc(tskim->bfa, event);
2817
2818 switch (event) {
2819 case BFA_TSKIM_SM_QRESUME:
2820 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2821 bfa_tskim_send(tskim);
2822 break;
2823
2824 case BFA_TSKIM_SM_CLEANUP:
2825 /*
2826 * No need to send TM on wire since ITN is offline.
2827 */
2828 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2829 bfa_reqq_wcancel(&tskim->reqq_wait);
2830 bfa_tskim_cleanup_ios(tskim);
2831 break;
2832
2833 case BFA_TSKIM_SM_HWFAIL:
2834 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2835 bfa_reqq_wcancel(&tskim->reqq_wait);
2836 bfa_tskim_iocdisable_ios(tskim);
2837 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2838 break;
2839
2840 default:
2841 bfa_sm_fault(tskim->bfa, event);
2842 }
2843 }
2844
2845 /*
2846 * Task management command is active, awaiting for room in request CQ
2847 * to send clean up request.
2848 */
2849 static void
2850 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2851 enum bfa_tskim_event event)
2852 {
2853 bfa_trc(tskim->bfa, event);
2854
2855 switch (event) {
2856 case BFA_TSKIM_SM_DONE:
2857 bfa_reqq_wcancel(&tskim->reqq_wait);
2858 /*
2859 * Fall through !!!
2860 */
2861 case BFA_TSKIM_SM_QRESUME:
2862 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2863 bfa_tskim_send_abort(tskim);
2864 break;
2865
2866 case BFA_TSKIM_SM_HWFAIL:
2867 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2868 bfa_reqq_wcancel(&tskim->reqq_wait);
2869 bfa_tskim_iocdisable_ios(tskim);
2870 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2871 break;
2872
2873 default:
2874 bfa_sm_fault(tskim->bfa, event);
2875 }
2876 }
2877
2878 /*
2879 * BFA callback is pending
2880 */
2881 static void
2882 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2883 {
2884 bfa_trc(tskim->bfa, event);
2885
2886 switch (event) {
2887 case BFA_TSKIM_SM_HCB:
2888 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2889 bfa_tskim_free(tskim);
2890 break;
2891
2892 case BFA_TSKIM_SM_CLEANUP:
2893 bfa_tskim_notify_comp(tskim);
2894 break;
2895
2896 case BFA_TSKIM_SM_HWFAIL:
2897 break;
2898
2899 default:
2900 bfa_sm_fault(tskim->bfa, event);
2901 }
2902 }
2903
2904 static void
2905 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2906 {
2907 struct bfa_tskim_s *tskim = cbarg;
2908
2909 if (!complete) {
2910 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2911 return;
2912 }
2913
2914 bfa_stats(tskim->itnim, tm_success);
2915 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2916 }
2917
2918 static void
2919 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2920 {
2921 struct bfa_tskim_s *tskim = cbarg;
2922
2923 if (!complete) {
2924 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2925 return;
2926 }
2927
2928 bfa_stats(tskim->itnim, tm_failures);
2929 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2930 BFI_TSKIM_STS_FAILED);
2931 }
2932
2933 static bfa_boolean_t
2934 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
2935 {
2936 switch (tskim->tm_cmnd) {
2937 case FCP_TM_TARGET_RESET:
2938 return BFA_TRUE;
2939
2940 case FCP_TM_ABORT_TASK_SET:
2941 case FCP_TM_CLEAR_TASK_SET:
2942 case FCP_TM_LUN_RESET:
2943 case FCP_TM_CLEAR_ACA:
2944 return !memcmp(&tskim->lun, &lun, sizeof(lun));
2945
2946 default:
2947 WARN_ON(1);
2948 }
2949
2950 return BFA_FALSE;
2951 }
2952
2953 /*
2954 * Gather affected IO requests and task management commands.
2955 */
2956 static void
2957 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2958 {
2959 struct bfa_itnim_s *itnim = tskim->itnim;
2960 struct bfa_ioim_s *ioim;
2961 struct list_head *qe, *qen;
2962 struct scsi_cmnd *cmnd;
2963 struct scsi_lun scsilun;
2964
2965 INIT_LIST_HEAD(&tskim->io_q);
2966
2967 /*
2968 * Gather any active IO requests first.
2969 */
2970 list_for_each_safe(qe, qen, &itnim->io_q) {
2971 ioim = (struct bfa_ioim_s *) qe;
2972 cmnd = (struct scsi_cmnd *) ioim->dio;
2973 int_to_scsilun(cmnd->device->lun, &scsilun);
2974 if (bfa_tskim_match_scope(tskim, scsilun)) {
2975 list_del(&ioim->qe);
2976 list_add_tail(&ioim->qe, &tskim->io_q);
2977 }
2978 }
2979
2980 /*
2981 * Failback any pending IO requests immediately.
2982 */
2983 list_for_each_safe(qe, qen, &itnim->pending_q) {
2984 ioim = (struct bfa_ioim_s *) qe;
2985 cmnd = (struct scsi_cmnd *) ioim->dio;
2986 int_to_scsilun(cmnd->device->lun, &scsilun);
2987 if (bfa_tskim_match_scope(tskim, scsilun)) {
2988 list_del(&ioim->qe);
2989 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2990 bfa_ioim_tov(ioim);
2991 }
2992 }
2993 }
2994
2995 /*
2996 * IO cleanup completion
2997 */
2998 static void
2999 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3000 {
3001 struct bfa_tskim_s *tskim = tskim_cbarg;
3002
3003 bfa_stats(tskim->itnim, tm_io_comps);
3004 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3005 }
3006
3007 /*
3008 * Gather affected IO requests and task management commands.
3009 */
3010 static void
3011 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3012 {
3013 struct bfa_ioim_s *ioim;
3014 struct list_head *qe, *qen;
3015
3016 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3017
3018 list_for_each_safe(qe, qen, &tskim->io_q) {
3019 ioim = (struct bfa_ioim_s *) qe;
3020 bfa_wc_up(&tskim->wc);
3021 bfa_ioim_cleanup_tm(ioim, tskim);
3022 }
3023
3024 bfa_wc_wait(&tskim->wc);
3025 }
3026
3027 /*
3028 * Send task management request to firmware.
3029 */
3030 static bfa_boolean_t
3031 bfa_tskim_send(struct bfa_tskim_s *tskim)
3032 {
3033 struct bfa_itnim_s *itnim = tskim->itnim;
3034 struct bfi_tskim_req_s *m;
3035
3036 /*
3037 * check for room in queue to send request now
3038 */
3039 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3040 if (!m)
3041 return BFA_FALSE;
3042
3043 /*
3044 * build i/o request message next
3045 */
3046 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3047 bfa_lpuid(tskim->bfa));
3048
3049 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3050 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3051 m->t_secs = tskim->tsecs;
3052 m->lun = tskim->lun;
3053 m->tm_flags = tskim->tm_cmnd;
3054
3055 /*
3056 * queue I/O message to firmware
3057 */
3058 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3059 return BFA_TRUE;
3060 }
3061
3062 /*
3063 * Send abort request to cleanup an active TM to firmware.
3064 */
3065 static bfa_boolean_t
3066 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3067 {
3068 struct bfa_itnim_s *itnim = tskim->itnim;
3069 struct bfi_tskim_abortreq_s *m;
3070
3071 /*
3072 * check for room in queue to send request now
3073 */
3074 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3075 if (!m)
3076 return BFA_FALSE;
3077
3078 /*
3079 * build i/o request message next
3080 */
3081 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3082 bfa_lpuid(tskim->bfa));
3083
3084 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3085
3086 /*
3087 * queue I/O message to firmware
3088 */
3089 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3090 return BFA_TRUE;
3091 }
3092
3093 /*
3094 * Call to resume task management cmnd waiting for room in request queue.
3095 */
3096 static void
3097 bfa_tskim_qresume(void *cbarg)
3098 {
3099 struct bfa_tskim_s *tskim = cbarg;
3100
3101 bfa_stats(tskim->itnim, tm_qresumes);
3102 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3103 }
3104
3105 /*
3106 * Cleanup IOs associated with a task mangement command on IOC failures.
3107 */
3108 static void
3109 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3110 {
3111 struct bfa_ioim_s *ioim;
3112 struct list_head *qe, *qen;
3113
3114 list_for_each_safe(qe, qen, &tskim->io_q) {
3115 ioim = (struct bfa_ioim_s *) qe;
3116 bfa_ioim_iocdisable(ioim);
3117 }
3118 }
3119
3120 /*
3121 * Notification on completions from related ioim.
3122 */
3123 void
3124 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3125 {
3126 bfa_wc_down(&tskim->wc);
3127 }
3128
3129 /*
3130 * Handle IOC h/w failure notification from itnim.
3131 */
3132 void
3133 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3134 {
3135 tskim->notify = BFA_FALSE;
3136 bfa_stats(tskim->itnim, tm_iocdowns);
3137 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3138 }
3139
3140 /*
3141 * Cleanup TM command and associated IOs as part of ITNIM offline.
3142 */
3143 void
3144 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3145 {
3146 tskim->notify = BFA_TRUE;
3147 bfa_stats(tskim->itnim, tm_cleanups);
3148 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3149 }
3150
3151 /*
3152 * Memory allocation and initialization.
3153 */
3154 void
3155 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3156 {
3157 struct bfa_tskim_s *tskim;
3158 u16 i;
3159
3160 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3161
3162 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3163 fcpim->tskim_arr = tskim;
3164
3165 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3166 /*
3167 * initialize TSKIM
3168 */
3169 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3170 tskim->tsk_tag = i;
3171 tskim->bfa = fcpim->bfa;
3172 tskim->fcpim = fcpim;
3173 tskim->notify = BFA_FALSE;
3174 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3175 tskim);
3176 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3177
3178 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3179 }
3180
3181 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3182 }
3183
3184 void
3185 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3186 {
3187 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3188 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3189 struct bfa_tskim_s *tskim;
3190 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
3191
3192 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3193 WARN_ON(tskim->tsk_tag != tsk_tag);
3194
3195 tskim->tsk_status = rsp->tsk_status;
3196
3197 /*
3198 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3199 * requests. All other statuses are for normal completions.
3200 */
3201 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3202 bfa_stats(tskim->itnim, tm_cleanup_comps);
3203 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3204 } else {
3205 bfa_stats(tskim->itnim, tm_fw_rsps);
3206 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3207 }
3208 }
3209
3210
3211 struct bfa_tskim_s *
3212 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3213 {
3214 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3215 struct bfa_tskim_s *tskim;
3216
3217 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3218
3219 if (tskim)
3220 tskim->dtsk = dtsk;
3221
3222 return tskim;
3223 }
3224
3225 void
3226 bfa_tskim_free(struct bfa_tskim_s *tskim)
3227 {
3228 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3229 list_del(&tskim->qe);
3230 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3231 }
3232
3233 /*
3234 * Start a task management command.
3235 *
3236 * @param[in] tskim BFA task management command instance
3237 * @param[in] itnim i-t nexus for the task management command
3238 * @param[in] lun lun, if applicable
3239 * @param[in] tm_cmnd Task management command code.
3240 * @param[in] t_secs Timeout in seconds
3241 *
3242 * @return None.
3243 */
3244 void
3245 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3246 struct scsi_lun lun,
3247 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3248 {
3249 tskim->itnim = itnim;
3250 tskim->lun = lun;
3251 tskim->tm_cmnd = tm_cmnd;
3252 tskim->tsecs = tsecs;
3253 tskim->notify = BFA_FALSE;
3254 bfa_stats(itnim, tm_cmnds);
3255
3256 list_add_tail(&tskim->qe, &itnim->tsk_q);
3257 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3258 }