2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/etherdevice.h>
27 #include <linux/if_ether.h>
28 #include <linux/if_vlan.h>
29 #include <linux/delay.h>
30 #include <linux/gfp.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/fc/fc_els.h>
37 #include <scsi/fc/fc_fcoe.h>
38 #include <scsi/libfc.h>
39 #include <scsi/fc_frame.h>
43 const char *fnic_state_str
[] = {
44 [FNIC_IN_FC_MODE
] = "FNIC_IN_FC_MODE",
45 [FNIC_IN_FC_TRANS_ETH_MODE
] = "FNIC_IN_FC_TRANS_ETH_MODE",
46 [FNIC_IN_ETH_MODE
] = "FNIC_IN_ETH_MODE",
47 [FNIC_IN_ETH_TRANS_FC_MODE
] = "FNIC_IN_ETH_TRANS_FC_MODE",
50 static const char *fnic_ioreq_state_str
[] = {
51 [FNIC_IOREQ_NOT_INITED
] = "FNIC_IOREQ_NOT_INITED",
52 [FNIC_IOREQ_CMD_PENDING
] = "FNIC_IOREQ_CMD_PENDING",
53 [FNIC_IOREQ_ABTS_PENDING
] = "FNIC_IOREQ_ABTS_PENDING",
54 [FNIC_IOREQ_ABTS_COMPLETE
] = "FNIC_IOREQ_ABTS_COMPLETE",
55 [FNIC_IOREQ_CMD_COMPLETE
] = "FNIC_IOREQ_CMD_COMPLETE",
58 static const char *fcpio_status_str
[] = {
59 [FCPIO_SUCCESS
] = "FCPIO_SUCCESS", /*0x0*/
60 [FCPIO_INVALID_HEADER
] = "FCPIO_INVALID_HEADER",
61 [FCPIO_OUT_OF_RESOURCE
] = "FCPIO_OUT_OF_RESOURCE",
62 [FCPIO_INVALID_PARAM
] = "FCPIO_INVALID_PARAM]",
63 [FCPIO_REQ_NOT_SUPPORTED
] = "FCPIO_REQ_NOT_SUPPORTED",
64 [FCPIO_IO_NOT_FOUND
] = "FCPIO_IO_NOT_FOUND",
65 [FCPIO_ABORTED
] = "FCPIO_ABORTED", /*0x41*/
66 [FCPIO_TIMEOUT
] = "FCPIO_TIMEOUT",
67 [FCPIO_SGL_INVALID
] = "FCPIO_SGL_INVALID",
68 [FCPIO_MSS_INVALID
] = "FCPIO_MSS_INVALID",
69 [FCPIO_DATA_CNT_MISMATCH
] = "FCPIO_DATA_CNT_MISMATCH",
70 [FCPIO_FW_ERR
] = "FCPIO_FW_ERR",
71 [FCPIO_ITMF_REJECTED
] = "FCPIO_ITMF_REJECTED",
72 [FCPIO_ITMF_FAILED
] = "FCPIO_ITMF_FAILED",
73 [FCPIO_ITMF_INCORRECT_LUN
] = "FCPIO_ITMF_INCORRECT_LUN",
74 [FCPIO_CMND_REJECTED
] = "FCPIO_CMND_REJECTED",
75 [FCPIO_NO_PATH_AVAIL
] = "FCPIO_NO_PATH_AVAIL",
76 [FCPIO_PATH_FAILED
] = "FCPIO_PATH_FAILED",
77 [FCPIO_LUNMAP_CHNG_PEND
] = "FCPIO_LUNHMAP_CHNG_PEND",
80 const char *fnic_state_to_str(unsigned int state
)
82 if (state
>= ARRAY_SIZE(fnic_state_str
) || !fnic_state_str
[state
])
85 return fnic_state_str
[state
];
88 static const char *fnic_ioreq_state_to_str(unsigned int state
)
90 if (state
>= ARRAY_SIZE(fnic_ioreq_state_str
) ||
91 !fnic_ioreq_state_str
[state
])
94 return fnic_ioreq_state_str
[state
];
97 static const char *fnic_fcpio_status_to_str(unsigned int status
)
99 if (status
>= ARRAY_SIZE(fcpio_status_str
) || !fcpio_status_str
[status
])
102 return fcpio_status_str
[status
];
105 static void fnic_cleanup_io(struct fnic
*fnic
);
107 static inline spinlock_t
*fnic_io_lock_hash(struct fnic
*fnic
,
108 struct scsi_cmnd
*sc
)
110 u32 hash
= scsi_cmd_to_rq(sc
)->tag
& (FNIC_IO_LOCKS
- 1);
112 return &fnic
->io_req_lock
[hash
];
115 static inline spinlock_t
*fnic_io_lock_tag(struct fnic
*fnic
,
118 return &fnic
->io_req_lock
[tag
& (FNIC_IO_LOCKS
- 1)];
122 * Unmap the data buffer and sense buffer for an io_req,
123 * also unmap and free the device-private scatter/gather list.
125 static void fnic_release_ioreq_buf(struct fnic
*fnic
,
126 struct fnic_io_req
*io_req
,
127 struct scsi_cmnd
*sc
)
129 if (io_req
->sgl_list_pa
)
130 dma_unmap_single(&fnic
->pdev
->dev
, io_req
->sgl_list_pa
,
131 sizeof(io_req
->sgl_list
[0]) * io_req
->sgl_cnt
,
136 mempool_free(io_req
->sgl_list_alloc
,
137 fnic
->io_sgl_pool
[io_req
->sgl_type
]);
138 if (io_req
->sense_buf_pa
)
139 dma_unmap_single(&fnic
->pdev
->dev
, io_req
->sense_buf_pa
,
140 SCSI_SENSE_BUFFERSIZE
, DMA_FROM_DEVICE
);
143 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
144 static int free_wq_copy_descs(struct fnic
*fnic
, struct vnic_wq_copy
*wq
)
146 /* if no Ack received from firmware, then nothing to clean */
147 if (!fnic
->fw_ack_recd
[0])
151 * Update desc_available count based on number of freed descriptors
152 * Account for wraparound
154 if (wq
->to_clean_index
<= fnic
->fw_ack_index
[0])
155 wq
->ring
.desc_avail
+= (fnic
->fw_ack_index
[0]
156 - wq
->to_clean_index
+ 1);
158 wq
->ring
.desc_avail
+= (wq
->ring
.desc_count
160 + fnic
->fw_ack_index
[0] + 1);
163 * just bump clean index to ack_index+1 accounting for wraparound
164 * this will essentially free up all descriptors between
165 * to_clean_index and fw_ack_index, both inclusive
168 (fnic
->fw_ack_index
[0] + 1) % wq
->ring
.desc_count
;
170 /* we have processed the acks received so far */
171 fnic
->fw_ack_recd
[0] = 0;
177 * __fnic_set_state_flags
178 * Sets/Clears bits in fnic's state_flags
181 __fnic_set_state_flags(struct fnic
*fnic
, unsigned long st_flags
,
182 unsigned long clearbits
)
184 unsigned long flags
= 0;
185 unsigned long host_lock_flags
= 0;
187 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
188 spin_lock_irqsave(fnic
->lport
->host
->host_lock
, host_lock_flags
);
191 fnic
->state_flags
&= ~st_flags
;
193 fnic
->state_flags
|= st_flags
;
195 spin_unlock_irqrestore(fnic
->lport
->host
->host_lock
, host_lock_flags
);
196 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
203 * fnic_fw_reset_handler
204 * Routine to send reset msg to fw
206 int fnic_fw_reset_handler(struct fnic
*fnic
)
208 struct vnic_wq_copy
*wq
= &fnic
->wq_copy
[0];
212 /* indicate fwreset to io path */
213 fnic_set_state_flags(fnic
, FNIC_FLAGS_FWRESET
);
215 skb_queue_purge(&fnic
->frame_queue
);
216 skb_queue_purge(&fnic
->tx_queue
);
218 /* wait for io cmpl */
219 while (atomic_read(&fnic
->in_flight
))
220 schedule_timeout(msecs_to_jiffies(1));
222 spin_lock_irqsave(&fnic
->wq_copy_lock
[0], flags
);
224 if (vnic_wq_copy_desc_avail(wq
) <= fnic
->wq_copy_desc_low
[0])
225 free_wq_copy_descs(fnic
, wq
);
227 if (!vnic_wq_copy_desc_avail(wq
))
230 fnic_queue_wq_copy_desc_fw_reset(wq
, SCSI_NO_TAG
);
231 atomic64_inc(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
);
232 if (atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
) >
233 atomic64_read(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
))
234 atomic64_set(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
,
236 &fnic
->fnic_stats
.fw_stats
.active_fw_reqs
));
239 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], flags
);
242 atomic64_inc(&fnic
->fnic_stats
.reset_stats
.fw_resets
);
243 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
244 "Issued fw reset\n");
246 fnic_clear_state_flags(fnic
, FNIC_FLAGS_FWRESET
);
247 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
248 "Failed to issue fw reset\n");
256 * fnic_flogi_reg_handler
257 * Routine to send flogi register msg to fw
259 int fnic_flogi_reg_handler(struct fnic
*fnic
, u32 fc_id
)
261 struct vnic_wq_copy
*wq
= &fnic
->wq_copy
[0];
262 enum fcpio_flogi_reg_format_type format
;
263 struct fc_lport
*lp
= fnic
->lport
;
268 spin_lock_irqsave(&fnic
->wq_copy_lock
[0], flags
);
270 if (vnic_wq_copy_desc_avail(wq
) <= fnic
->wq_copy_desc_low
[0])
271 free_wq_copy_descs(fnic
, wq
);
273 if (!vnic_wq_copy_desc_avail(wq
)) {
275 goto flogi_reg_ioreq_end
;
278 if (fnic
->ctlr
.map_dest
) {
279 eth_broadcast_addr(gw_mac
);
280 format
= FCPIO_FLOGI_REG_DEF_DEST
;
282 memcpy(gw_mac
, fnic
->ctlr
.dest_addr
, ETH_ALEN
);
283 format
= FCPIO_FLOGI_REG_GW_DEST
;
286 if ((fnic
->config
.flags
& VFCF_FIP_CAPABLE
) && !fnic
->ctlr
.map_dest
) {
287 fnic_queue_wq_copy_desc_fip_reg(wq
, SCSI_NO_TAG
,
290 lp
->r_a_tov
, lp
->e_d_tov
);
291 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
292 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
293 fc_id
, fnic
->data_src_addr
, gw_mac
);
295 fnic_queue_wq_copy_desc_flogi_reg(wq
, SCSI_NO_TAG
,
296 format
, fc_id
, gw_mac
);
297 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
298 "FLOGI reg issued fcid %x map %d dest %pM\n",
299 fc_id
, fnic
->ctlr
.map_dest
, gw_mac
);
302 atomic64_inc(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
);
303 if (atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
) >
304 atomic64_read(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
))
305 atomic64_set(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
,
306 atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
));
309 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], flags
);
314 * fnic_queue_wq_copy_desc
315 * Routine to enqueue a wq copy desc
317 static inline int fnic_queue_wq_copy_desc(struct fnic
*fnic
,
318 struct vnic_wq_copy
*wq
,
319 struct fnic_io_req
*io_req
,
320 struct scsi_cmnd
*sc
,
323 struct scatterlist
*sg
;
324 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc
->device
));
325 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
326 struct host_sg_desc
*desc
;
327 struct misc_stats
*misc_stats
= &fnic
->fnic_stats
.misc_stats
;
329 unsigned long intr_flags
;
332 struct scsi_lun fc_lun
;
335 /* For each SGE, create a device desc entry */
336 desc
= io_req
->sgl_list
;
337 for_each_sg(scsi_sglist(sc
), sg
, sg_count
, i
) {
338 desc
->addr
= cpu_to_le64(sg_dma_address(sg
));
339 desc
->len
= cpu_to_le32(sg_dma_len(sg
));
344 io_req
->sgl_list_pa
= dma_map_single(&fnic
->pdev
->dev
,
346 sizeof(io_req
->sgl_list
[0]) * sg_count
,
348 if (dma_mapping_error(&fnic
->pdev
->dev
, io_req
->sgl_list_pa
)) {
349 printk(KERN_ERR
"DMA mapping failed\n");
350 return SCSI_MLQUEUE_HOST_BUSY
;
354 io_req
->sense_buf_pa
= dma_map_single(&fnic
->pdev
->dev
,
356 SCSI_SENSE_BUFFERSIZE
,
358 if (dma_mapping_error(&fnic
->pdev
->dev
, io_req
->sense_buf_pa
)) {
359 dma_unmap_single(&fnic
->pdev
->dev
, io_req
->sgl_list_pa
,
360 sizeof(io_req
->sgl_list
[0]) * sg_count
,
362 printk(KERN_ERR
"DMA mapping failed\n");
363 return SCSI_MLQUEUE_HOST_BUSY
;
366 int_to_scsilun(sc
->device
->lun
, &fc_lun
);
368 /* Enqueue the descriptor in the Copy WQ */
369 spin_lock_irqsave(&fnic
->wq_copy_lock
[0], intr_flags
);
371 if (vnic_wq_copy_desc_avail(wq
) <= fnic
->wq_copy_desc_low
[0])
372 free_wq_copy_descs(fnic
, wq
);
374 if (unlikely(!vnic_wq_copy_desc_avail(wq
))) {
375 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], intr_flags
);
376 FNIC_SCSI_DBG(KERN_INFO
, fnic
->lport
->host
,
377 "fnic_queue_wq_copy_desc failure - no descriptors\n");
378 atomic64_inc(&misc_stats
->io_cpwq_alloc_failures
);
379 return SCSI_MLQUEUE_HOST_BUSY
;
383 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
)
384 flags
= FCPIO_ICMND_RDDATA
;
385 else if (sc
->sc_data_direction
== DMA_TO_DEVICE
)
386 flags
= FCPIO_ICMND_WRDATA
;
389 if ((fnic
->config
.flags
& VFCF_FCP_SEQ_LVL_ERR
) &&
390 (rp
->flags
& FC_RP_FLAGS_RETRY
))
391 exch_flags
|= FCPIO_ICMND_SRFLAG_RETRY
;
393 fnic_queue_wq_copy_desc_icmnd_16(wq
, scsi_cmd_to_rq(sc
)->tag
,
394 0, exch_flags
, io_req
->sgl_cnt
,
395 SCSI_SENSE_BUFFERSIZE
,
397 io_req
->sense_buf_pa
,
398 0, /* scsi cmd ref, always 0 */
399 FCPIO_ICMND_PTA_SIMPLE
,
400 /* scsi pri and tag */
401 flags
, /* command flags */
402 sc
->cmnd
, sc
->cmd_len
,
404 fc_lun
.scsi_lun
, io_req
->port_id
,
405 rport
->maxframe_size
, rp
->r_a_tov
,
408 atomic64_inc(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
);
409 if (atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
) >
410 atomic64_read(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
))
411 atomic64_set(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
,
412 atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
));
414 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], intr_flags
);
420 * Routine to send a scsi cdb
421 * Called with host_lock held and interrupts disabled.
423 static int fnic_queuecommand_lck(struct scsi_cmnd
*sc
, void (*done
)(struct scsi_cmnd
*))
425 const int tag
= scsi_cmd_to_rq(sc
)->tag
;
426 struct fc_lport
*lp
= shost_priv(sc
->device
->host
);
427 struct fc_rport
*rport
;
428 struct fnic_io_req
*io_req
= NULL
;
429 struct fnic
*fnic
= lport_priv(lp
);
430 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
431 struct vnic_wq_copy
*wq
;
435 unsigned long flags
= 0;
437 spinlock_t
*io_lock
= NULL
;
438 int io_lock_acquired
= 0;
439 struct fc_rport_libfc_priv
*rp
;
441 if (unlikely(fnic_chk_state_flags_locked(fnic
, FNIC_FLAGS_IO_BLOCKED
)))
442 return SCSI_MLQUEUE_HOST_BUSY
;
444 if (unlikely(fnic_chk_state_flags_locked(fnic
, FNIC_FLAGS_FWRESET
)))
445 return SCSI_MLQUEUE_HOST_BUSY
;
447 rport
= starget_to_rport(scsi_target(sc
->device
));
449 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
450 "returning DID_NO_CONNECT for IO as rport is NULL\n");
451 sc
->result
= DID_NO_CONNECT
<< 16;
456 ret
= fc_remote_port_chkready(rport
);
458 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
459 "rport is not ready\n");
460 atomic64_inc(&fnic_stats
->misc_stats
.rport_not_ready
);
467 if (!rp
|| rp
->rp_state
== RPORT_ST_DELETE
) {
468 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
469 "rport 0x%x removed, returning DID_NO_CONNECT\n",
472 atomic64_inc(&fnic_stats
->misc_stats
.rport_not_ready
);
473 sc
->result
= DID_NO_CONNECT
<<16;
478 if (rp
->rp_state
!= RPORT_ST_READY
) {
479 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
480 "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
481 rport
->port_id
, rp
->rp_state
);
483 sc
->result
= DID_IMM_RETRY
<< 16;
488 if (lp
->state
!= LPORT_ST_READY
|| !(lp
->link_up
))
489 return SCSI_MLQUEUE_HOST_BUSY
;
491 atomic_inc(&fnic
->in_flight
);
494 * Release host lock, use driver resource specific locks from here.
495 * Don't re-enable interrupts in case they were disabled prior to the
496 * caller disabling them.
498 spin_unlock(lp
->host
->host_lock
);
499 CMD_STATE(sc
) = FNIC_IOREQ_NOT_INITED
;
500 CMD_FLAGS(sc
) = FNIC_NO_FLAGS
;
502 /* Get a new io_req for this SCSI IO */
503 io_req
= mempool_alloc(fnic
->io_req_pool
, GFP_ATOMIC
);
505 atomic64_inc(&fnic_stats
->io_stats
.alloc_failures
);
506 ret
= SCSI_MLQUEUE_HOST_BUSY
;
509 memset(io_req
, 0, sizeof(*io_req
));
511 /* Map the data buffer */
512 sg_count
= scsi_dma_map(sc
);
514 FNIC_TRACE(fnic_queuecommand
, sc
->device
->host
->host_no
,
515 tag
, sc
, 0, sc
->cmnd
[0], sg_count
, CMD_STATE(sc
));
516 mempool_free(io_req
, fnic
->io_req_pool
);
520 /* Determine the type of scatter/gather list we need */
521 io_req
->sgl_cnt
= sg_count
;
522 io_req
->sgl_type
= FNIC_SGL_CACHE_DFLT
;
523 if (sg_count
> FNIC_DFLT_SG_DESC_CNT
)
524 io_req
->sgl_type
= FNIC_SGL_CACHE_MAX
;
528 mempool_alloc(fnic
->io_sgl_pool
[io_req
->sgl_type
],
530 if (!io_req
->sgl_list
) {
531 atomic64_inc(&fnic_stats
->io_stats
.alloc_failures
);
532 ret
= SCSI_MLQUEUE_HOST_BUSY
;
534 mempool_free(io_req
, fnic
->io_req_pool
);
538 /* Cache sgl list allocated address before alignment */
539 io_req
->sgl_list_alloc
= io_req
->sgl_list
;
540 ptr
= (unsigned long) io_req
->sgl_list
;
541 if (ptr
% FNIC_SG_DESC_ALIGN
) {
542 io_req
->sgl_list
= (struct host_sg_desc
*)
543 (((unsigned long) ptr
544 + FNIC_SG_DESC_ALIGN
- 1)
545 & ~(FNIC_SG_DESC_ALIGN
- 1));
550 * Will acquire lock defore setting to IO initialized.
553 io_lock
= fnic_io_lock_hash(fnic
, sc
);
554 spin_lock_irqsave(io_lock
, flags
);
556 /* initialize rest of io_req */
557 io_lock_acquired
= 1;
558 io_req
->port_id
= rport
->port_id
;
559 io_req
->start_time
= jiffies
;
560 CMD_STATE(sc
) = FNIC_IOREQ_CMD_PENDING
;
561 CMD_SP(sc
) = (char *)io_req
;
562 CMD_FLAGS(sc
) |= FNIC_IO_INITIALIZED
;
563 sc
->scsi_done
= done
;
565 /* create copy wq desc and enqueue it */
566 wq
= &fnic
->wq_copy
[0];
567 ret
= fnic_queue_wq_copy_desc(fnic
, wq
, io_req
, sc
, sg_count
);
570 * In case another thread cancelled the request,
571 * refetch the pointer under the lock.
573 FNIC_TRACE(fnic_queuecommand
, sc
->device
->host
->host_no
,
575 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
576 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
578 CMD_STATE(sc
) = FNIC_IOREQ_CMD_COMPLETE
;
579 spin_unlock_irqrestore(io_lock
, flags
);
581 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
582 mempool_free(io_req
, fnic
->io_req_pool
);
584 atomic_dec(&fnic
->in_flight
);
585 /* acquire host lock before returning to SCSI */
586 spin_lock(lp
->host
->host_lock
);
589 atomic64_inc(&fnic_stats
->io_stats
.active_ios
);
590 atomic64_inc(&fnic_stats
->io_stats
.num_ios
);
591 if (atomic64_read(&fnic_stats
->io_stats
.active_ios
) >
592 atomic64_read(&fnic_stats
->io_stats
.max_active_ios
))
593 atomic64_set(&fnic_stats
->io_stats
.max_active_ios
,
594 atomic64_read(&fnic_stats
->io_stats
.active_ios
));
596 /* REVISIT: Use per IO lock in the final code */
597 CMD_FLAGS(sc
) |= FNIC_IO_ISSUED
;
600 cmd_trace
= ((u64
)sc
->cmnd
[0] << 56 | (u64
)sc
->cmnd
[7] << 40 |
601 (u64
)sc
->cmnd
[8] << 32 | (u64
)sc
->cmnd
[2] << 24 |
602 (u64
)sc
->cmnd
[3] << 16 | (u64
)sc
->cmnd
[4] << 8 |
605 FNIC_TRACE(fnic_queuecommand
, sc
->device
->host
->host_no
,
606 tag
, sc
, io_req
, sg_count
, cmd_trace
,
607 (((u64
)CMD_FLAGS(sc
) >> 32) | CMD_STATE(sc
)));
609 /* if only we issued IO, will we have the io lock */
610 if (io_lock_acquired
)
611 spin_unlock_irqrestore(io_lock
, flags
);
613 atomic_dec(&fnic
->in_flight
);
614 /* acquire host lock before returning to SCSI */
615 spin_lock(lp
->host
->host_lock
);
619 DEF_SCSI_QCMD(fnic_queuecommand
)
622 * fnic_fcpio_fw_reset_cmpl_handler
623 * Routine to handle fw reset completion
625 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic
*fnic
,
626 struct fcpio_fw_req
*desc
)
630 struct fcpio_tag tag
;
633 struct reset_stats
*reset_stats
= &fnic
->fnic_stats
.reset_stats
;
635 fcpio_header_dec(&desc
->hdr
, &type
, &hdr_status
, &tag
);
637 atomic64_inc(&reset_stats
->fw_reset_completions
);
639 /* Clean up all outstanding io requests */
640 fnic_cleanup_io(fnic
);
642 atomic64_set(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
, 0);
643 atomic64_set(&fnic
->fnic_stats
.io_stats
.active_ios
, 0);
644 atomic64_set(&fnic
->io_cmpl_skip
, 0);
646 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
648 /* fnic should be in FC_TRANS_ETH_MODE */
649 if (fnic
->state
== FNIC_IN_FC_TRANS_ETH_MODE
) {
650 /* Check status of reset completion */
652 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
653 "reset cmpl success\n");
654 /* Ready to send flogi out */
655 fnic
->state
= FNIC_IN_ETH_MODE
;
657 FNIC_SCSI_DBG(KERN_DEBUG
,
659 "fnic fw_reset : failed %s\n",
660 fnic_fcpio_status_to_str(hdr_status
));
663 * Unable to change to eth mode, cannot send out flogi
664 * Change state to fc mode, so that subsequent Flogi
665 * requests from libFC will cause more attempts to
666 * reset the firmware. Free the cached flogi
668 fnic
->state
= FNIC_IN_FC_MODE
;
669 atomic64_inc(&reset_stats
->fw_reset_failures
);
673 FNIC_SCSI_DBG(KERN_DEBUG
,
675 "Unexpected state %s while processing"
676 " reset cmpl\n", fnic_state_to_str(fnic
->state
));
677 atomic64_inc(&reset_stats
->fw_reset_failures
);
681 /* Thread removing device blocks till firmware reset is complete */
682 if (fnic
->remove_wait
)
683 complete(fnic
->remove_wait
);
686 * If fnic is being removed, or fw reset failed
687 * free the flogi frame. Else, send it out
689 if (fnic
->remove_wait
|| ret
) {
690 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
691 skb_queue_purge(&fnic
->tx_queue
);
692 goto reset_cmpl_handler_end
;
695 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
699 reset_cmpl_handler_end
:
700 fnic_clear_state_flags(fnic
, FNIC_FLAGS_FWRESET
);
706 * fnic_fcpio_flogi_reg_cmpl_handler
707 * Routine to handle flogi register completion
709 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic
*fnic
,
710 struct fcpio_fw_req
*desc
)
714 struct fcpio_tag tag
;
718 fcpio_header_dec(&desc
->hdr
, &type
, &hdr_status
, &tag
);
720 /* Update fnic state based on status of flogi reg completion */
721 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
723 if (fnic
->state
== FNIC_IN_ETH_TRANS_FC_MODE
) {
725 /* Check flogi registration completion status */
727 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
728 "flog reg succeeded\n");
729 fnic
->state
= FNIC_IN_FC_MODE
;
731 FNIC_SCSI_DBG(KERN_DEBUG
,
733 "fnic flogi reg :failed %s\n",
734 fnic_fcpio_status_to_str(hdr_status
));
735 fnic
->state
= FNIC_IN_ETH_MODE
;
739 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
740 "Unexpected fnic state %s while"
741 " processing flogi reg completion\n",
742 fnic_state_to_str(fnic
->state
));
747 if (fnic
->stop_rx_link_events
) {
748 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
749 goto reg_cmpl_handler_end
;
751 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
754 queue_work(fnic_event_queue
, &fnic
->frame_work
);
756 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
759 reg_cmpl_handler_end
:
763 static inline int is_ack_index_in_range(struct vnic_wq_copy
*wq
,
766 if (wq
->to_clean_index
<= wq
->to_use_index
) {
767 /* out of range, stale request_out index */
768 if (request_out
< wq
->to_clean_index
||
769 request_out
>= wq
->to_use_index
)
772 /* out of range, stale request_out index */
773 if (request_out
< wq
->to_clean_index
&&
774 request_out
>= wq
->to_use_index
)
777 /* request_out index is in range */
783 * Mark that ack received and store the Ack index. If there are multiple
784 * acks received before Tx thread cleans it up, the latest value will be
785 * used which is correct behavior. This state should be in the copy Wq
786 * instead of in the fnic
788 static inline void fnic_fcpio_ack_handler(struct fnic
*fnic
,
789 unsigned int cq_index
,
790 struct fcpio_fw_req
*desc
)
792 struct vnic_wq_copy
*wq
;
793 u16 request_out
= desc
->u
.ack
.request_out
;
795 u64
*ox_id_tag
= (u64
*)(void *)desc
;
797 /* mark the ack state */
798 wq
= &fnic
->wq_copy
[cq_index
- fnic
->raw_wq_count
- fnic
->rq_count
];
799 spin_lock_irqsave(&fnic
->wq_copy_lock
[0], flags
);
801 fnic
->fnic_stats
.misc_stats
.last_ack_time
= jiffies
;
802 if (is_ack_index_in_range(wq
, request_out
)) {
803 fnic
->fw_ack_index
[0] = request_out
;
804 fnic
->fw_ack_recd
[0] = 1;
807 &fnic
->fnic_stats
.misc_stats
.ack_index_out_of_range
);
809 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], flags
);
810 FNIC_TRACE(fnic_fcpio_ack_handler
,
811 fnic
->lport
->host
->host_no
, 0, 0, ox_id_tag
[2], ox_id_tag
[3],
812 ox_id_tag
[4], ox_id_tag
[5]);
816 * fnic_fcpio_icmnd_cmpl_handler
817 * Routine to handle icmnd completions
819 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic
*fnic
,
820 struct fcpio_fw_req
*desc
)
824 struct fcpio_tag tag
;
827 struct fcpio_icmnd_cmpl
*icmnd_cmpl
;
828 struct fnic_io_req
*io_req
;
829 struct scsi_cmnd
*sc
;
830 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
834 unsigned long start_time
;
835 unsigned long io_duration_time
;
837 /* Decode the cmpl description to get the io_req id */
838 fcpio_header_dec(&desc
->hdr
, &type
, &hdr_status
, &tag
);
839 fcpio_tag_id_dec(&tag
, &id
);
840 icmnd_cmpl
= &desc
->u
.icmnd_cmpl
;
842 if (id
>= fnic
->fnic_max_tag_id
) {
843 shost_printk(KERN_ERR
, fnic
->lport
->host
,
844 "Tag out of range tag %x hdr status = %s\n",
845 id
, fnic_fcpio_status_to_str(hdr_status
));
849 sc
= scsi_host_find_tag(fnic
->lport
->host
, id
);
852 atomic64_inc(&fnic_stats
->io_stats
.sc_null
);
853 shost_printk(KERN_ERR
, fnic
->lport
->host
,
854 "icmnd_cmpl sc is null - "
855 "hdr status = %s tag = 0x%x desc = 0x%p\n",
856 fnic_fcpio_status_to_str(hdr_status
), id
, desc
);
857 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler
,
858 fnic
->lport
->host
->host_no
, id
,
859 ((u64
)icmnd_cmpl
->_resvd0
[1] << 16 |
860 (u64
)icmnd_cmpl
->_resvd0
[0]),
861 ((u64
)hdr_status
<< 16 |
862 (u64
)icmnd_cmpl
->scsi_status
<< 8 |
863 (u64
)icmnd_cmpl
->flags
), desc
,
864 (u64
)icmnd_cmpl
->residual
, 0);
868 io_lock
= fnic_io_lock_hash(fnic
, sc
);
869 spin_lock_irqsave(io_lock
, flags
);
870 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
871 WARN_ON_ONCE(!io_req
);
873 atomic64_inc(&fnic_stats
->io_stats
.ioreq_null
);
874 CMD_FLAGS(sc
) |= FNIC_IO_REQ_NULL
;
875 spin_unlock_irqrestore(io_lock
, flags
);
876 shost_printk(KERN_ERR
, fnic
->lport
->host
,
877 "icmnd_cmpl io_req is null - "
878 "hdr status = %s tag = 0x%x sc 0x%p\n",
879 fnic_fcpio_status_to_str(hdr_status
), id
, sc
);
882 start_time
= io_req
->start_time
;
884 /* firmware completed the io */
885 io_req
->io_completed
= 1;
888 * if SCSI-ML has already issued abort on this command,
889 * set completion of the IO. The abts path will clean it up
891 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
) {
894 * set the FNIC_IO_DONE so that this doesn't get
895 * flagged as 'out of order' if it was not aborted
897 CMD_FLAGS(sc
) |= FNIC_IO_DONE
;
898 CMD_FLAGS(sc
) |= FNIC_IO_ABTS_PENDING
;
899 spin_unlock_irqrestore(io_lock
, flags
);
900 if(FCPIO_ABORTED
== hdr_status
)
901 CMD_FLAGS(sc
) |= FNIC_IO_ABORTED
;
903 FNIC_SCSI_DBG(KERN_INFO
, fnic
->lport
->host
,
904 "icmnd_cmpl abts pending "
905 "hdr status = %s tag = 0x%x sc = 0x%p "
906 "scsi_status = %x residual = %d\n",
907 fnic_fcpio_status_to_str(hdr_status
),
909 icmnd_cmpl
->scsi_status
,
910 icmnd_cmpl
->residual
);
914 /* Mark the IO as complete */
915 CMD_STATE(sc
) = FNIC_IOREQ_CMD_COMPLETE
;
917 icmnd_cmpl
= &desc
->u
.icmnd_cmpl
;
919 switch (hdr_status
) {
921 sc
->result
= (DID_OK
<< 16) | icmnd_cmpl
->scsi_status
;
922 xfer_len
= scsi_bufflen(sc
);
924 if (icmnd_cmpl
->flags
& FCPIO_ICMND_CMPL_RESID_UNDER
) {
925 xfer_len
-= icmnd_cmpl
->residual
;
926 scsi_set_resid(sc
, icmnd_cmpl
->residual
);
929 if (icmnd_cmpl
->scsi_status
== SAM_STAT_CHECK_CONDITION
)
930 atomic64_inc(&fnic_stats
->misc_stats
.check_condition
);
932 if (icmnd_cmpl
->scsi_status
== SAM_STAT_TASK_SET_FULL
)
933 atomic64_inc(&fnic_stats
->misc_stats
.queue_fulls
);
936 case FCPIO_TIMEOUT
: /* request was timed out */
937 atomic64_inc(&fnic_stats
->misc_stats
.fcpio_timeout
);
938 sc
->result
= (DID_TIME_OUT
<< 16) | icmnd_cmpl
->scsi_status
;
941 case FCPIO_ABORTED
: /* request was aborted */
942 atomic64_inc(&fnic_stats
->misc_stats
.fcpio_aborted
);
943 sc
->result
= (DID_ERROR
<< 16) | icmnd_cmpl
->scsi_status
;
946 case FCPIO_DATA_CNT_MISMATCH
: /* recv/sent more/less data than exp. */
947 atomic64_inc(&fnic_stats
->misc_stats
.data_count_mismatch
);
948 scsi_set_resid(sc
, icmnd_cmpl
->residual
);
949 sc
->result
= (DID_ERROR
<< 16) | icmnd_cmpl
->scsi_status
;
952 case FCPIO_OUT_OF_RESOURCE
: /* out of resources to complete request */
953 atomic64_inc(&fnic_stats
->fw_stats
.fw_out_of_resources
);
954 sc
->result
= (DID_REQUEUE
<< 16) | icmnd_cmpl
->scsi_status
;
957 case FCPIO_IO_NOT_FOUND
: /* requested I/O was not found */
958 atomic64_inc(&fnic_stats
->io_stats
.io_not_found
);
959 sc
->result
= (DID_ERROR
<< 16) | icmnd_cmpl
->scsi_status
;
962 case FCPIO_SGL_INVALID
: /* request was aborted due to sgl error */
963 atomic64_inc(&fnic_stats
->misc_stats
.sgl_invalid
);
964 sc
->result
= (DID_ERROR
<< 16) | icmnd_cmpl
->scsi_status
;
967 case FCPIO_FW_ERR
: /* request was terminated due fw error */
968 atomic64_inc(&fnic_stats
->fw_stats
.io_fw_errs
);
969 sc
->result
= (DID_ERROR
<< 16) | icmnd_cmpl
->scsi_status
;
972 case FCPIO_MSS_INVALID
: /* request was aborted due to mss error */
973 atomic64_inc(&fnic_stats
->misc_stats
.mss_invalid
);
974 sc
->result
= (DID_ERROR
<< 16) | icmnd_cmpl
->scsi_status
;
977 case FCPIO_INVALID_HEADER
: /* header contains invalid data */
978 case FCPIO_INVALID_PARAM
: /* some parameter in request invalid */
979 case FCPIO_REQ_NOT_SUPPORTED
:/* request type is not supported */
981 sc
->result
= (DID_ERROR
<< 16) | icmnd_cmpl
->scsi_status
;
985 /* Break link with the SCSI command */
987 CMD_FLAGS(sc
) |= FNIC_IO_DONE
;
989 spin_unlock_irqrestore(io_lock
, flags
);
991 if (hdr_status
!= FCPIO_SUCCESS
) {
992 atomic64_inc(&fnic_stats
->io_stats
.io_failures
);
993 shost_printk(KERN_ERR
, fnic
->lport
->host
, "hdr status = %s\n",
994 fnic_fcpio_status_to_str(hdr_status
));
997 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
999 mempool_free(io_req
, fnic
->io_req_pool
);
1001 cmd_trace
= ((u64
)hdr_status
<< 56) |
1002 (u64
)icmnd_cmpl
->scsi_status
<< 48 |
1003 (u64
)icmnd_cmpl
->flags
<< 40 | (u64
)sc
->cmnd
[0] << 32 |
1004 (u64
)sc
->cmnd
[2] << 24 | (u64
)sc
->cmnd
[3] << 16 |
1005 (u64
)sc
->cmnd
[4] << 8 | sc
->cmnd
[5];
1007 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler
,
1008 sc
->device
->host
->host_no
, id
, sc
,
1009 ((u64
)icmnd_cmpl
->_resvd0
[1] << 56 |
1010 (u64
)icmnd_cmpl
->_resvd0
[0] << 48 |
1011 jiffies_to_msecs(jiffies
- start_time
)),
1013 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
1015 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
) {
1016 fnic
->lport
->host_stats
.fcp_input_requests
++;
1017 fnic
->fcp_input_bytes
+= xfer_len
;
1018 } else if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
1019 fnic
->lport
->host_stats
.fcp_output_requests
++;
1020 fnic
->fcp_output_bytes
+= xfer_len
;
1022 fnic
->lport
->host_stats
.fcp_control_requests
++;
1024 atomic64_dec(&fnic_stats
->io_stats
.active_ios
);
1025 if (atomic64_read(&fnic
->io_cmpl_skip
))
1026 atomic64_dec(&fnic
->io_cmpl_skip
);
1028 atomic64_inc(&fnic_stats
->io_stats
.io_completions
);
1031 io_duration_time
= jiffies_to_msecs(jiffies
) -
1032 jiffies_to_msecs(start_time
);
1034 if(io_duration_time
<= 10)
1035 atomic64_inc(&fnic_stats
->io_stats
.io_btw_0_to_10_msec
);
1036 else if(io_duration_time
<= 100)
1037 atomic64_inc(&fnic_stats
->io_stats
.io_btw_10_to_100_msec
);
1038 else if(io_duration_time
<= 500)
1039 atomic64_inc(&fnic_stats
->io_stats
.io_btw_100_to_500_msec
);
1040 else if(io_duration_time
<= 5000)
1041 atomic64_inc(&fnic_stats
->io_stats
.io_btw_500_to_5000_msec
);
1042 else if(io_duration_time
<= 10000)
1043 atomic64_inc(&fnic_stats
->io_stats
.io_btw_5000_to_10000_msec
);
1044 else if(io_duration_time
<= 30000)
1045 atomic64_inc(&fnic_stats
->io_stats
.io_btw_10000_to_30000_msec
);
1047 atomic64_inc(&fnic_stats
->io_stats
.io_greater_than_30000_msec
);
1049 if(io_duration_time
> atomic64_read(&fnic_stats
->io_stats
.current_max_io_time
))
1050 atomic64_set(&fnic_stats
->io_stats
.current_max_io_time
, io_duration_time
);
1053 /* Call SCSI completion function to complete the IO */
1058 /* fnic_fcpio_itmf_cmpl_handler
1059 * Routine to handle itmf completions
1061 static void fnic_fcpio_itmf_cmpl_handler(struct fnic
*fnic
,
1062 struct fcpio_fw_req
*desc
)
1066 struct fcpio_tag tag
;
1068 struct scsi_cmnd
*sc
;
1069 struct fnic_io_req
*io_req
;
1070 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
1071 struct abort_stats
*abts_stats
= &fnic
->fnic_stats
.abts_stats
;
1072 struct terminate_stats
*term_stats
= &fnic
->fnic_stats
.term_stats
;
1073 struct misc_stats
*misc_stats
= &fnic
->fnic_stats
.misc_stats
;
1074 unsigned long flags
;
1075 spinlock_t
*io_lock
;
1076 unsigned long start_time
;
1078 fcpio_header_dec(&desc
->hdr
, &type
, &hdr_status
, &tag
);
1079 fcpio_tag_id_dec(&tag
, &id
);
1081 if ((id
& FNIC_TAG_MASK
) >= fnic
->fnic_max_tag_id
) {
1082 shost_printk(KERN_ERR
, fnic
->lport
->host
,
1083 "Tag out of range tag %x hdr status = %s\n",
1084 id
, fnic_fcpio_status_to_str(hdr_status
));
1088 sc
= scsi_host_find_tag(fnic
->lport
->host
, id
& FNIC_TAG_MASK
);
1091 atomic64_inc(&fnic_stats
->io_stats
.sc_null
);
1092 shost_printk(KERN_ERR
, fnic
->lport
->host
,
1093 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1094 fnic_fcpio_status_to_str(hdr_status
), id
);
1097 io_lock
= fnic_io_lock_hash(fnic
, sc
);
1098 spin_lock_irqsave(io_lock
, flags
);
1099 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
1100 WARN_ON_ONCE(!io_req
);
1102 atomic64_inc(&fnic_stats
->io_stats
.ioreq_null
);
1103 spin_unlock_irqrestore(io_lock
, flags
);
1104 CMD_FLAGS(sc
) |= FNIC_IO_ABT_TERM_REQ_NULL
;
1105 shost_printk(KERN_ERR
, fnic
->lport
->host
,
1106 "itmf_cmpl io_req is null - "
1107 "hdr status = %s tag = 0x%x sc 0x%p\n",
1108 fnic_fcpio_status_to_str(hdr_status
), id
, sc
);
1111 start_time
= io_req
->start_time
;
1113 if ((id
& FNIC_TAG_ABORT
) && (id
& FNIC_TAG_DEV_RST
)) {
1114 /* Abort and terminate completion of device reset req */
1115 /* REVISIT : Add asserts about various flags */
1116 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1117 "dev reset abts cmpl recd. id %x status %s\n",
1118 id
, fnic_fcpio_status_to_str(hdr_status
));
1119 CMD_STATE(sc
) = FNIC_IOREQ_ABTS_COMPLETE
;
1120 CMD_ABTS_STATUS(sc
) = hdr_status
;
1121 CMD_FLAGS(sc
) |= FNIC_DEV_RST_DONE
;
1122 if (io_req
->abts_done
)
1123 complete(io_req
->abts_done
);
1124 spin_unlock_irqrestore(io_lock
, flags
);
1125 } else if (id
& FNIC_TAG_ABORT
) {
1126 /* Completion of abort cmd */
1127 switch (hdr_status
) {
1131 if (CMD_FLAGS(sc
) & FNIC_IO_ABTS_ISSUED
)
1132 atomic64_inc(&abts_stats
->abort_fw_timeouts
);
1135 &term_stats
->terminate_fw_timeouts
);
1137 case FCPIO_ITMF_REJECTED
:
1138 FNIC_SCSI_DBG(KERN_INFO
, fnic
->lport
->host
,
1139 "abort reject recd. id %d\n",
1140 (int)(id
& FNIC_TAG_MASK
));
1142 case FCPIO_IO_NOT_FOUND
:
1143 if (CMD_FLAGS(sc
) & FNIC_IO_ABTS_ISSUED
)
1144 atomic64_inc(&abts_stats
->abort_io_not_found
);
1147 &term_stats
->terminate_io_not_found
);
1150 if (CMD_FLAGS(sc
) & FNIC_IO_ABTS_ISSUED
)
1151 atomic64_inc(&abts_stats
->abort_failures
);
1154 &term_stats
->terminate_failures
);
1157 if (CMD_STATE(sc
) != FNIC_IOREQ_ABTS_PENDING
) {
1158 /* This is a late completion. Ignore it */
1159 spin_unlock_irqrestore(io_lock
, flags
);
1163 CMD_FLAGS(sc
) |= FNIC_IO_ABT_TERM_DONE
;
1164 CMD_ABTS_STATUS(sc
) = hdr_status
;
1166 /* If the status is IO not found consider it as success */
1167 if (hdr_status
== FCPIO_IO_NOT_FOUND
)
1168 CMD_ABTS_STATUS(sc
) = FCPIO_SUCCESS
;
1170 if (!(CMD_FLAGS(sc
) & (FNIC_IO_ABORTED
| FNIC_IO_DONE
)))
1171 atomic64_inc(&misc_stats
->no_icmnd_itmf_cmpls
);
1173 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1174 "abts cmpl recd. id %d status %s\n",
1175 (int)(id
& FNIC_TAG_MASK
),
1176 fnic_fcpio_status_to_str(hdr_status
));
1179 * If scsi_eh thread is blocked waiting for abts to complete,
1180 * signal completion to it. IO will be cleaned in the thread
1181 * else clean it in this context
1183 if (io_req
->abts_done
) {
1184 complete(io_req
->abts_done
);
1185 spin_unlock_irqrestore(io_lock
, flags
);
1187 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1188 "abts cmpl, completing IO\n");
1190 sc
->result
= (DID_ERROR
<< 16);
1192 spin_unlock_irqrestore(io_lock
, flags
);
1194 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
1195 mempool_free(io_req
, fnic
->io_req_pool
);
1196 if (sc
->scsi_done
) {
1197 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler
,
1198 sc
->device
->host
->host_no
, id
,
1200 jiffies_to_msecs(jiffies
- start_time
),
1202 (((u64
)hdr_status
<< 40) |
1203 (u64
)sc
->cmnd
[0] << 32 |
1204 (u64
)sc
->cmnd
[2] << 24 |
1205 (u64
)sc
->cmnd
[3] << 16 |
1206 (u64
)sc
->cmnd
[4] << 8 | sc
->cmnd
[5]),
1207 (((u64
)CMD_FLAGS(sc
) << 32) |
1210 atomic64_dec(&fnic_stats
->io_stats
.active_ios
);
1211 if (atomic64_read(&fnic
->io_cmpl_skip
))
1212 atomic64_dec(&fnic
->io_cmpl_skip
);
1214 atomic64_inc(&fnic_stats
->io_stats
.io_completions
);
1218 } else if (id
& FNIC_TAG_DEV_RST
) {
1219 /* Completion of device reset */
1220 CMD_LR_STATUS(sc
) = hdr_status
;
1221 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
) {
1222 spin_unlock_irqrestore(io_lock
, flags
);
1223 CMD_FLAGS(sc
) |= FNIC_DEV_RST_ABTS_PENDING
;
1224 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler
,
1225 sc
->device
->host
->host_no
, id
, sc
,
1226 jiffies_to_msecs(jiffies
- start_time
),
1228 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
1229 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1230 "Terminate pending "
1231 "dev reset cmpl recd. id %d status %s\n",
1232 (int)(id
& FNIC_TAG_MASK
),
1233 fnic_fcpio_status_to_str(hdr_status
));
1236 if (CMD_FLAGS(sc
) & FNIC_DEV_RST_TIMED_OUT
) {
1237 /* Need to wait for terminate completion */
1238 spin_unlock_irqrestore(io_lock
, flags
);
1239 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler
,
1240 sc
->device
->host
->host_no
, id
, sc
,
1241 jiffies_to_msecs(jiffies
- start_time
),
1243 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
1244 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1245 "dev reset cmpl recd after time out. "
1246 "id %d status %s\n",
1247 (int)(id
& FNIC_TAG_MASK
),
1248 fnic_fcpio_status_to_str(hdr_status
));
1251 CMD_STATE(sc
) = FNIC_IOREQ_CMD_COMPLETE
;
1252 CMD_FLAGS(sc
) |= FNIC_DEV_RST_DONE
;
1253 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1254 "dev reset cmpl recd. id %d status %s\n",
1255 (int)(id
& FNIC_TAG_MASK
),
1256 fnic_fcpio_status_to_str(hdr_status
));
1257 if (io_req
->dr_done
)
1258 complete(io_req
->dr_done
);
1259 spin_unlock_irqrestore(io_lock
, flags
);
1262 shost_printk(KERN_ERR
, fnic
->lport
->host
,
1263 "Unexpected itmf io state %s tag %x\n",
1264 fnic_ioreq_state_to_str(CMD_STATE(sc
)), id
);
1265 spin_unlock_irqrestore(io_lock
, flags
);
1271 * fnic_fcpio_cmpl_handler
1272 * Routine to service the cq for wq_copy
1274 static int fnic_fcpio_cmpl_handler(struct vnic_dev
*vdev
,
1275 unsigned int cq_index
,
1276 struct fcpio_fw_req
*desc
)
1278 struct fnic
*fnic
= vnic_dev_priv(vdev
);
1280 switch (desc
->hdr
.type
) {
1281 case FCPIO_ICMND_CMPL
: /* fw completed a command */
1282 case FCPIO_ITMF_CMPL
: /* fw completed itmf (abort cmd, lun reset)*/
1283 case FCPIO_FLOGI_REG_CMPL
: /* fw completed flogi_reg */
1284 case FCPIO_FLOGI_FIP_REG_CMPL
: /* fw completed flogi_fip_reg */
1285 case FCPIO_RESET_CMPL
: /* fw completed reset */
1286 atomic64_dec(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
);
1292 switch (desc
->hdr
.type
) {
1293 case FCPIO_ACK
: /* fw copied copy wq desc to its queue */
1294 fnic_fcpio_ack_handler(fnic
, cq_index
, desc
);
1297 case FCPIO_ICMND_CMPL
: /* fw completed a command */
1298 fnic_fcpio_icmnd_cmpl_handler(fnic
, desc
);
1301 case FCPIO_ITMF_CMPL
: /* fw completed itmf (abort cmd, lun reset)*/
1302 fnic_fcpio_itmf_cmpl_handler(fnic
, desc
);
1305 case FCPIO_FLOGI_REG_CMPL
: /* fw completed flogi_reg */
1306 case FCPIO_FLOGI_FIP_REG_CMPL
: /* fw completed flogi_fip_reg */
1307 fnic_fcpio_flogi_reg_cmpl_handler(fnic
, desc
);
1310 case FCPIO_RESET_CMPL
: /* fw completed reset */
1311 fnic_fcpio_fw_reset_cmpl_handler(fnic
, desc
);
1315 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1316 "firmware completion type %d\n",
1325 * fnic_wq_copy_cmpl_handler
1326 * Routine to process wq copy
1328 int fnic_wq_copy_cmpl_handler(struct fnic
*fnic
, int copy_work_to_do
)
1330 unsigned int wq_work_done
= 0;
1331 unsigned int i
, cq_index
;
1332 unsigned int cur_work_done
;
1333 struct misc_stats
*misc_stats
= &fnic
->fnic_stats
.misc_stats
;
1334 u64 start_jiffies
= 0;
1335 u64 end_jiffies
= 0;
1336 u64 delta_jiffies
= 0;
1339 for (i
= 0; i
< fnic
->wq_copy_count
; i
++) {
1340 cq_index
= i
+ fnic
->raw_wq_count
+ fnic
->rq_count
;
1342 start_jiffies
= jiffies
;
1343 cur_work_done
= vnic_cq_copy_service(&fnic
->cq
[cq_index
],
1344 fnic_fcpio_cmpl_handler
,
1346 end_jiffies
= jiffies
;
1348 wq_work_done
+= cur_work_done
;
1349 delta_jiffies
= end_jiffies
- start_jiffies
;
1351 (u64
) atomic64_read(&misc_stats
->max_isr_jiffies
)) {
1352 atomic64_set(&misc_stats
->max_isr_jiffies
,
1354 delta_ms
= jiffies_to_msecs(delta_jiffies
);
1355 atomic64_set(&misc_stats
->max_isr_time_ms
, delta_ms
);
1356 atomic64_set(&misc_stats
->corr_work_done
,
1360 return wq_work_done
;
1363 static bool fnic_cleanup_io_iter(struct scsi_cmnd
*sc
, void *data
,
1366 const int tag
= scsi_cmd_to_rq(sc
)->tag
;
1367 struct fnic
*fnic
= data
;
1368 struct fnic_io_req
*io_req
;
1369 unsigned long flags
= 0;
1370 spinlock_t
*io_lock
;
1371 unsigned long start_time
= 0;
1372 struct fnic_stats
*fnic_stats
= &fnic
->fnic_stats
;
1374 io_lock
= fnic_io_lock_tag(fnic
, tag
);
1375 spin_lock_irqsave(io_lock
, flags
);
1377 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
1378 if ((CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
) &&
1379 !(CMD_FLAGS(sc
) & FNIC_DEV_RST_DONE
)) {
1381 * We will be here only when FW completes reset
1382 * without sending completions for outstanding ios.
1384 CMD_FLAGS(sc
) |= FNIC_DEV_RST_DONE
;
1385 if (io_req
&& io_req
->dr_done
)
1386 complete(io_req
->dr_done
);
1387 else if (io_req
&& io_req
->abts_done
)
1388 complete(io_req
->abts_done
);
1389 spin_unlock_irqrestore(io_lock
, flags
);
1391 } else if (CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
) {
1392 spin_unlock_irqrestore(io_lock
, flags
);
1396 spin_unlock_irqrestore(io_lock
, flags
);
1397 goto cleanup_scsi_cmd
;
1402 spin_unlock_irqrestore(io_lock
, flags
);
1405 * If there is a scsi_cmnd associated with this io_req, then
1406 * free the corresponding state
1408 start_time
= io_req
->start_time
;
1409 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
1410 mempool_free(io_req
, fnic
->io_req_pool
);
1413 sc
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
1414 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1415 "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1416 tag
, sc
, jiffies
- start_time
);
1418 if (atomic64_read(&fnic
->io_cmpl_skip
))
1419 atomic64_dec(&fnic
->io_cmpl_skip
);
1421 atomic64_inc(&fnic_stats
->io_stats
.io_completions
);
1423 /* Complete the command to SCSI */
1424 if (sc
->scsi_done
) {
1425 if (!(CMD_FLAGS(sc
) & FNIC_IO_ISSUED
))
1426 shost_printk(KERN_ERR
, fnic
->lport
->host
,
1427 "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
1430 FNIC_TRACE(fnic_cleanup_io
,
1431 sc
->device
->host
->host_no
, tag
, sc
,
1432 jiffies_to_msecs(jiffies
- start_time
),
1433 0, ((u64
)sc
->cmnd
[0] << 32 |
1434 (u64
)sc
->cmnd
[2] << 24 |
1435 (u64
)sc
->cmnd
[3] << 16 |
1436 (u64
)sc
->cmnd
[4] << 8 | sc
->cmnd
[5]),
1437 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
1444 static void fnic_cleanup_io(struct fnic
*fnic
)
1446 scsi_host_busy_iter(fnic
->lport
->host
,
1447 fnic_cleanup_io_iter
, fnic
);
1450 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy
*wq
,
1451 struct fcpio_host_req
*desc
)
1454 struct fnic
*fnic
= vnic_dev_priv(wq
->vdev
);
1455 struct fnic_io_req
*io_req
;
1456 struct scsi_cmnd
*sc
;
1457 unsigned long flags
;
1458 spinlock_t
*io_lock
;
1459 unsigned long start_time
= 0;
1461 /* get the tag reference */
1462 fcpio_tag_id_dec(&desc
->hdr
.tag
, &id
);
1463 id
&= FNIC_TAG_MASK
;
1465 if (id
>= fnic
->fnic_max_tag_id
)
1468 sc
= scsi_host_find_tag(fnic
->lport
->host
, id
);
1472 io_lock
= fnic_io_lock_hash(fnic
, sc
);
1473 spin_lock_irqsave(io_lock
, flags
);
1475 /* Get the IO context which this desc refers to */
1476 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
1478 /* fnic interrupts are turned off by now */
1481 spin_unlock_irqrestore(io_lock
, flags
);
1482 goto wq_copy_cleanup_scsi_cmd
;
1487 spin_unlock_irqrestore(io_lock
, flags
);
1489 start_time
= io_req
->start_time
;
1490 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
1491 mempool_free(io_req
, fnic
->io_req_pool
);
1493 wq_copy_cleanup_scsi_cmd
:
1494 sc
->result
= DID_NO_CONNECT
<< 16;
1495 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
, "wq_copy_cleanup_handler:"
1496 " DID_NO_CONNECT\n");
1498 if (sc
->scsi_done
) {
1499 FNIC_TRACE(fnic_wq_copy_cleanup_handler
,
1500 sc
->device
->host
->host_no
, id
, sc
,
1501 jiffies_to_msecs(jiffies
- start_time
),
1502 0, ((u64
)sc
->cmnd
[0] << 32 |
1503 (u64
)sc
->cmnd
[2] << 24 | (u64
)sc
->cmnd
[3] << 16 |
1504 (u64
)sc
->cmnd
[4] << 8 | sc
->cmnd
[5]),
1505 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
1511 static inline int fnic_queue_abort_io_req(struct fnic
*fnic
, int tag
,
1512 u32 task_req
, u8
*fc_lun
,
1513 struct fnic_io_req
*io_req
)
1515 struct vnic_wq_copy
*wq
= &fnic
->wq_copy
[0];
1516 struct Scsi_Host
*host
= fnic
->lport
->host
;
1517 struct misc_stats
*misc_stats
= &fnic
->fnic_stats
.misc_stats
;
1518 unsigned long flags
;
1520 spin_lock_irqsave(host
->host_lock
, flags
);
1521 if (unlikely(fnic_chk_state_flags_locked(fnic
,
1522 FNIC_FLAGS_IO_BLOCKED
))) {
1523 spin_unlock_irqrestore(host
->host_lock
, flags
);
1526 atomic_inc(&fnic
->in_flight
);
1527 spin_unlock_irqrestore(host
->host_lock
, flags
);
1529 spin_lock_irqsave(&fnic
->wq_copy_lock
[0], flags
);
1531 if (vnic_wq_copy_desc_avail(wq
) <= fnic
->wq_copy_desc_low
[0])
1532 free_wq_copy_descs(fnic
, wq
);
1534 if (!vnic_wq_copy_desc_avail(wq
)) {
1535 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], flags
);
1536 atomic_dec(&fnic
->in_flight
);
1537 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1538 "fnic_queue_abort_io_req: failure: no descriptors\n");
1539 atomic64_inc(&misc_stats
->abts_cpwq_alloc_failures
);
1542 fnic_queue_wq_copy_desc_itmf(wq
, tag
| FNIC_TAG_ABORT
,
1543 0, task_req
, tag
, fc_lun
, io_req
->port_id
,
1544 fnic
->config
.ra_tov
, fnic
->config
.ed_tov
);
1546 atomic64_inc(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
);
1547 if (atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
) >
1548 atomic64_read(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
))
1549 atomic64_set(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
,
1550 atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
));
1552 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], flags
);
1553 atomic_dec(&fnic
->in_flight
);
1558 struct fnic_rport_abort_io_iter_data
{
1564 static bool fnic_rport_abort_io_iter(struct scsi_cmnd
*sc
, void *data
,
1567 struct fnic_rport_abort_io_iter_data
*iter_data
= data
;
1568 struct fnic
*fnic
= iter_data
->fnic
;
1569 int abt_tag
= scsi_cmd_to_rq(sc
)->tag
;
1570 struct fnic_io_req
*io_req
;
1571 spinlock_t
*io_lock
;
1572 unsigned long flags
;
1573 struct reset_stats
*reset_stats
= &fnic
->fnic_stats
.reset_stats
;
1574 struct terminate_stats
*term_stats
= &fnic
->fnic_stats
.term_stats
;
1575 struct scsi_lun fc_lun
;
1576 enum fnic_ioreq_state old_ioreq_state
;
1578 io_lock
= fnic_io_lock_tag(fnic
, abt_tag
);
1579 spin_lock_irqsave(io_lock
, flags
);
1581 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
1583 if (!io_req
|| io_req
->port_id
!= iter_data
->port_id
) {
1584 spin_unlock_irqrestore(io_lock
, flags
);
1588 if ((CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
) &&
1589 (!(CMD_FLAGS(sc
) & FNIC_DEV_RST_ISSUED
))) {
1590 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1591 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1593 spin_unlock_irqrestore(io_lock
, flags
);
1598 * Found IO that is still pending with firmware and
1599 * belongs to rport that went away
1601 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
) {
1602 spin_unlock_irqrestore(io_lock
, flags
);
1605 if (io_req
->abts_done
) {
1606 shost_printk(KERN_ERR
, fnic
->lport
->host
,
1607 "fnic_rport_exch_reset: io_req->abts_done is set "
1609 fnic_ioreq_state_to_str(CMD_STATE(sc
)));
1612 if (!(CMD_FLAGS(sc
) & FNIC_IO_ISSUED
)) {
1613 shost_printk(KERN_ERR
, fnic
->lport
->host
,
1615 "IO not yet issued %p tag 0x%x flags "
1617 sc
, abt_tag
, CMD_FLAGS(sc
), CMD_STATE(sc
));
1619 old_ioreq_state
= CMD_STATE(sc
);
1620 CMD_STATE(sc
) = FNIC_IOREQ_ABTS_PENDING
;
1621 CMD_ABTS_STATUS(sc
) = FCPIO_INVALID_CODE
;
1622 if (CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
) {
1623 atomic64_inc(&reset_stats
->device_reset_terminates
);
1624 abt_tag
|= FNIC_TAG_DEV_RST
;
1626 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1627 "fnic_rport_exch_reset dev rst sc 0x%p\n", sc
);
1628 BUG_ON(io_req
->abts_done
);
1630 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1631 "fnic_rport_reset_exch: Issuing abts\n");
1633 spin_unlock_irqrestore(io_lock
, flags
);
1635 /* Now queue the abort command to firmware */
1636 int_to_scsilun(sc
->device
->lun
, &fc_lun
);
1638 if (fnic_queue_abort_io_req(fnic
, abt_tag
,
1639 FCPIO_ITMF_ABT_TASK_TERM
,
1640 fc_lun
.scsi_lun
, io_req
)) {
1642 * Revert the cmd state back to old state, if
1643 * it hasn't changed in between. This cmd will get
1644 * aborted later by scsi_eh, or cleaned up during
1647 spin_lock_irqsave(io_lock
, flags
);
1648 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
)
1649 CMD_STATE(sc
) = old_ioreq_state
;
1650 spin_unlock_irqrestore(io_lock
, flags
);
1652 spin_lock_irqsave(io_lock
, flags
);
1653 if (CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
)
1654 CMD_FLAGS(sc
) |= FNIC_DEV_RST_TERM_ISSUED
;
1656 CMD_FLAGS(sc
) |= FNIC_IO_INTERNAL_TERM_ISSUED
;
1657 spin_unlock_irqrestore(io_lock
, flags
);
1658 atomic64_inc(&term_stats
->terminates
);
1659 iter_data
->term_cnt
++;
1664 static void fnic_rport_exch_reset(struct fnic
*fnic
, u32 port_id
)
1666 struct terminate_stats
*term_stats
= &fnic
->fnic_stats
.term_stats
;
1667 struct fnic_rport_abort_io_iter_data iter_data
= {
1673 FNIC_SCSI_DBG(KERN_DEBUG
,
1675 "fnic_rport_exch_reset called portid 0x%06x\n",
1678 if (fnic
->in_remove
)
1681 scsi_host_busy_iter(fnic
->lport
->host
, fnic_rport_abort_io_iter
,
1683 if (iter_data
.term_cnt
> atomic64_read(&term_stats
->max_terminates
))
1684 atomic64_set(&term_stats
->max_terminates
, iter_data
.term_cnt
);
1688 void fnic_terminate_rport_io(struct fc_rport
*rport
)
1690 struct fc_rport_libfc_priv
*rdata
;
1691 struct fc_lport
*lport
;
1695 printk(KERN_ERR
"fnic_terminate_rport_io: rport is NULL\n");
1698 rdata
= rport
->dd_data
;
1701 printk(KERN_ERR
"fnic_terminate_rport_io: rdata is NULL\n");
1704 lport
= rdata
->local_port
;
1707 printk(KERN_ERR
"fnic_terminate_rport_io: lport is NULL\n");
1710 fnic
= lport_priv(lport
);
1711 FNIC_SCSI_DBG(KERN_DEBUG
,
1712 fnic
->lport
->host
, "fnic_terminate_rport_io called"
1713 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1714 rport
->port_name
, rport
->node_name
, rport
,
1717 if (fnic
->in_remove
)
1720 fnic_rport_exch_reset(fnic
, rport
->port_id
);
1724 * This function is exported to SCSI for sending abort cmnds.
1725 * A SCSI IO is represented by a io_req in the driver.
1726 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1728 int fnic_abort_cmd(struct scsi_cmnd
*sc
)
1730 struct request
*const rq
= scsi_cmd_to_rq(sc
);
1731 struct fc_lport
*lp
;
1733 struct fnic_io_req
*io_req
= NULL
;
1734 struct fc_rport
*rport
;
1735 spinlock_t
*io_lock
;
1736 unsigned long flags
;
1737 unsigned long start_time
= 0;
1740 struct scsi_lun fc_lun
;
1741 struct fnic_stats
*fnic_stats
;
1742 struct abort_stats
*abts_stats
;
1743 struct terminate_stats
*term_stats
;
1744 enum fnic_ioreq_state old_ioreq_state
;
1745 const int tag
= rq
->tag
;
1746 unsigned long abt_issued_time
;
1747 DECLARE_COMPLETION_ONSTACK(tm_done
);
1749 /* Wait for rport to unblock */
1750 fc_block_scsi_eh(sc
);
1752 /* Get local-port, check ready and link up */
1753 lp
= shost_priv(sc
->device
->host
);
1755 fnic
= lport_priv(lp
);
1756 fnic_stats
= &fnic
->fnic_stats
;
1757 abts_stats
= &fnic
->fnic_stats
.abts_stats
;
1758 term_stats
= &fnic
->fnic_stats
.term_stats
;
1760 rport
= starget_to_rport(scsi_target(sc
->device
));
1761 FNIC_SCSI_DBG(KERN_DEBUG
,
1763 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1764 rport
->port_id
, sc
->device
->lun
, tag
, CMD_FLAGS(sc
));
1766 CMD_FLAGS(sc
) = FNIC_NO_FLAGS
;
1768 if (lp
->state
!= LPORT_ST_READY
|| !(lp
->link_up
)) {
1770 goto fnic_abort_cmd_end
;
1774 * Avoid a race between SCSI issuing the abort and the device
1775 * completing the command.
1777 * If the command is already completed by the fw cmpl code,
1778 * we just return SUCCESS from here. This means that the abort
1779 * succeeded. In the SCSI ML, since the timeout for command has
1780 * happened, the completion wont actually complete the command
1781 * and it will be considered as an aborted command
1783 * The CMD_SP will not be cleared except while holding io_req_lock.
1785 io_lock
= fnic_io_lock_hash(fnic
, sc
);
1786 spin_lock_irqsave(io_lock
, flags
);
1787 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
1789 spin_unlock_irqrestore(io_lock
, flags
);
1790 goto fnic_abort_cmd_end
;
1793 io_req
->abts_done
= &tm_done
;
1795 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
) {
1796 spin_unlock_irqrestore(io_lock
, flags
);
1800 abt_issued_time
= jiffies_to_msecs(jiffies
) - jiffies_to_msecs(io_req
->start_time
);
1801 if (abt_issued_time
<= 6000)
1802 atomic64_inc(&abts_stats
->abort_issued_btw_0_to_6_sec
);
1803 else if (abt_issued_time
> 6000 && abt_issued_time
<= 20000)
1804 atomic64_inc(&abts_stats
->abort_issued_btw_6_to_20_sec
);
1805 else if (abt_issued_time
> 20000 && abt_issued_time
<= 30000)
1806 atomic64_inc(&abts_stats
->abort_issued_btw_20_to_30_sec
);
1807 else if (abt_issued_time
> 30000 && abt_issued_time
<= 40000)
1808 atomic64_inc(&abts_stats
->abort_issued_btw_30_to_40_sec
);
1809 else if (abt_issued_time
> 40000 && abt_issued_time
<= 50000)
1810 atomic64_inc(&abts_stats
->abort_issued_btw_40_to_50_sec
);
1811 else if (abt_issued_time
> 50000 && abt_issued_time
<= 60000)
1812 atomic64_inc(&abts_stats
->abort_issued_btw_50_to_60_sec
);
1814 atomic64_inc(&abts_stats
->abort_issued_greater_than_60_sec
);
1816 FNIC_SCSI_DBG(KERN_INFO
, fnic
->lport
->host
,
1817 "CBD Opcode: %02x Abort issued time: %lu msec\n", sc
->cmnd
[0], abt_issued_time
);
1819 * Command is still pending, need to abort it
1820 * If the firmware completes the command after this point,
1821 * the completion wont be done till mid-layer, since abort
1822 * has already started.
1824 old_ioreq_state
= CMD_STATE(sc
);
1825 CMD_STATE(sc
) = FNIC_IOREQ_ABTS_PENDING
;
1826 CMD_ABTS_STATUS(sc
) = FCPIO_INVALID_CODE
;
1828 spin_unlock_irqrestore(io_lock
, flags
);
1831 * Check readiness of the remote port. If the path to remote
1832 * port is up, then send abts to the remote port to terminate
1833 * the IO. Else, just locally terminate the IO in the firmware
1835 if (fc_remote_port_chkready(rport
) == 0)
1836 task_req
= FCPIO_ITMF_ABT_TASK
;
1838 atomic64_inc(&fnic_stats
->misc_stats
.rport_not_ready
);
1839 task_req
= FCPIO_ITMF_ABT_TASK_TERM
;
1842 /* Now queue the abort command to firmware */
1843 int_to_scsilun(sc
->device
->lun
, &fc_lun
);
1845 if (fnic_queue_abort_io_req(fnic
, tag
, task_req
, fc_lun
.scsi_lun
,
1847 spin_lock_irqsave(io_lock
, flags
);
1848 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
)
1849 CMD_STATE(sc
) = old_ioreq_state
;
1850 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
1852 io_req
->abts_done
= NULL
;
1853 spin_unlock_irqrestore(io_lock
, flags
);
1855 goto fnic_abort_cmd_end
;
1857 if (task_req
== FCPIO_ITMF_ABT_TASK
) {
1858 CMD_FLAGS(sc
) |= FNIC_IO_ABTS_ISSUED
;
1859 atomic64_inc(&fnic_stats
->abts_stats
.aborts
);
1861 CMD_FLAGS(sc
) |= FNIC_IO_TERM_ISSUED
;
1862 atomic64_inc(&fnic_stats
->term_stats
.terminates
);
1866 * We queued an abort IO, wait for its completion.
1867 * Once the firmware completes the abort command, it will
1868 * wake up this thread.
1871 wait_for_completion_timeout(&tm_done
,
1873 (2 * fnic
->config
.ra_tov
+
1874 fnic
->config
.ed_tov
));
1876 /* Check the abort status */
1877 spin_lock_irqsave(io_lock
, flags
);
1879 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
1881 atomic64_inc(&fnic_stats
->io_stats
.ioreq_null
);
1882 spin_unlock_irqrestore(io_lock
, flags
);
1883 CMD_FLAGS(sc
) |= FNIC_IO_ABT_TERM_REQ_NULL
;
1885 goto fnic_abort_cmd_end
;
1887 io_req
->abts_done
= NULL
;
1889 /* fw did not complete abort, timed out */
1890 if (CMD_ABTS_STATUS(sc
) == FCPIO_INVALID_CODE
) {
1891 spin_unlock_irqrestore(io_lock
, flags
);
1892 if (task_req
== FCPIO_ITMF_ABT_TASK
) {
1893 atomic64_inc(&abts_stats
->abort_drv_timeouts
);
1895 atomic64_inc(&term_stats
->terminate_drv_timeouts
);
1897 CMD_FLAGS(sc
) |= FNIC_IO_ABT_TERM_TIMED_OUT
;
1899 goto fnic_abort_cmd_end
;
1902 /* IO out of order */
1904 if (!(CMD_FLAGS(sc
) & (FNIC_IO_ABORTED
| FNIC_IO_DONE
))) {
1905 spin_unlock_irqrestore(io_lock
, flags
);
1906 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1907 "Issuing Host reset due to out of order IO\n");
1910 goto fnic_abort_cmd_end
;
1913 CMD_STATE(sc
) = FNIC_IOREQ_ABTS_COMPLETE
;
1915 start_time
= io_req
->start_time
;
1917 * firmware completed the abort, check the status,
1918 * free the io_req if successful. If abort fails,
1919 * Device reset will clean the I/O.
1921 if (CMD_ABTS_STATUS(sc
) == FCPIO_SUCCESS
)
1925 spin_unlock_irqrestore(io_lock
, flags
);
1926 goto fnic_abort_cmd_end
;
1929 spin_unlock_irqrestore(io_lock
, flags
);
1931 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
1932 mempool_free(io_req
, fnic
->io_req_pool
);
1934 if (sc
->scsi_done
) {
1935 /* Call SCSI completion function to complete the IO */
1936 sc
->result
= (DID_ABORT
<< 16);
1938 atomic64_dec(&fnic_stats
->io_stats
.active_ios
);
1939 if (atomic64_read(&fnic
->io_cmpl_skip
))
1940 atomic64_dec(&fnic
->io_cmpl_skip
);
1942 atomic64_inc(&fnic_stats
->io_stats
.io_completions
);
1946 FNIC_TRACE(fnic_abort_cmd
, sc
->device
->host
->host_no
, tag
, sc
,
1947 jiffies_to_msecs(jiffies
- start_time
),
1948 0, ((u64
)sc
->cmnd
[0] << 32 |
1949 (u64
)sc
->cmnd
[2] << 24 | (u64
)sc
->cmnd
[3] << 16 |
1950 (u64
)sc
->cmnd
[4] << 8 | sc
->cmnd
[5]),
1951 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
1953 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1954 "Returning from abort cmd type %x %s\n", task_req
,
1956 "SUCCESS" : "FAILED");
1960 static inline int fnic_queue_dr_io_req(struct fnic
*fnic
,
1961 struct scsi_cmnd
*sc
,
1962 struct fnic_io_req
*io_req
)
1964 struct vnic_wq_copy
*wq
= &fnic
->wq_copy
[0];
1965 struct Scsi_Host
*host
= fnic
->lport
->host
;
1966 struct misc_stats
*misc_stats
= &fnic
->fnic_stats
.misc_stats
;
1967 struct scsi_lun fc_lun
;
1969 unsigned long intr_flags
;
1971 spin_lock_irqsave(host
->host_lock
, intr_flags
);
1972 if (unlikely(fnic_chk_state_flags_locked(fnic
,
1973 FNIC_FLAGS_IO_BLOCKED
))) {
1974 spin_unlock_irqrestore(host
->host_lock
, intr_flags
);
1977 atomic_inc(&fnic
->in_flight
);
1978 spin_unlock_irqrestore(host
->host_lock
, intr_flags
);
1980 spin_lock_irqsave(&fnic
->wq_copy_lock
[0], intr_flags
);
1982 if (vnic_wq_copy_desc_avail(wq
) <= fnic
->wq_copy_desc_low
[0])
1983 free_wq_copy_descs(fnic
, wq
);
1985 if (!vnic_wq_copy_desc_avail(wq
)) {
1986 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
1987 "queue_dr_io_req failure - no descriptors\n");
1988 atomic64_inc(&misc_stats
->devrst_cpwq_alloc_failures
);
1993 /* fill in the lun info */
1994 int_to_scsilun(sc
->device
->lun
, &fc_lun
);
1996 fnic_queue_wq_copy_desc_itmf(wq
, scsi_cmd_to_rq(sc
)->tag
| FNIC_TAG_DEV_RST
,
1997 0, FCPIO_ITMF_LUN_RESET
, SCSI_NO_TAG
,
1998 fc_lun
.scsi_lun
, io_req
->port_id
,
1999 fnic
->config
.ra_tov
, fnic
->config
.ed_tov
);
2001 atomic64_inc(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
);
2002 if (atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
) >
2003 atomic64_read(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
))
2004 atomic64_set(&fnic
->fnic_stats
.fw_stats
.max_fw_reqs
,
2005 atomic64_read(&fnic
->fnic_stats
.fw_stats
.active_fw_reqs
));
2008 spin_unlock_irqrestore(&fnic
->wq_copy_lock
[0], intr_flags
);
2009 atomic_dec(&fnic
->in_flight
);
2014 struct fnic_pending_aborts_iter_data
{
2016 struct scsi_cmnd
*lr_sc
;
2017 struct scsi_device
*lun_dev
;
2021 static bool fnic_pending_aborts_iter(struct scsi_cmnd
*sc
,
2022 void *data
, bool reserved
)
2024 struct fnic_pending_aborts_iter_data
*iter_data
= data
;
2025 struct fnic
*fnic
= iter_data
->fnic
;
2026 struct scsi_device
*lun_dev
= iter_data
->lun_dev
;
2027 int abt_tag
= scsi_cmd_to_rq(sc
)->tag
;
2028 struct fnic_io_req
*io_req
;
2029 spinlock_t
*io_lock
;
2030 unsigned long flags
;
2031 struct scsi_lun fc_lun
;
2032 DECLARE_COMPLETION_ONSTACK(tm_done
);
2033 enum fnic_ioreq_state old_ioreq_state
;
2035 if (sc
== iter_data
->lr_sc
|| sc
->device
!= lun_dev
)
2040 io_lock
= fnic_io_lock_tag(fnic
, abt_tag
);
2041 spin_lock_irqsave(io_lock
, flags
);
2042 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2044 spin_unlock_irqrestore(io_lock
, flags
);
2049 * Found IO that is still pending with firmware and
2050 * belongs to the LUN that we are resetting
2052 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2053 "Found IO in %s on lun\n",
2054 fnic_ioreq_state_to_str(CMD_STATE(sc
)));
2056 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
) {
2057 spin_unlock_irqrestore(io_lock
, flags
);
2060 if ((CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
) &&
2061 (!(CMD_FLAGS(sc
) & FNIC_DEV_RST_ISSUED
))) {
2062 FNIC_SCSI_DBG(KERN_INFO
, fnic
->lport
->host
,
2063 "%s dev rst not pending sc 0x%p\n", __func__
,
2065 spin_unlock_irqrestore(io_lock
, flags
);
2069 if (io_req
->abts_done
)
2070 shost_printk(KERN_ERR
, fnic
->lport
->host
,
2071 "%s: io_req->abts_done is set state is %s\n",
2072 __func__
, fnic_ioreq_state_to_str(CMD_STATE(sc
)));
2073 old_ioreq_state
= CMD_STATE(sc
);
2075 * Any pending IO issued prior to reset is expected to be
2076 * in abts pending state, if not we need to set
2077 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2078 * When IO is completed, the IO will be handed over and
2079 * handled in this function.
2081 CMD_STATE(sc
) = FNIC_IOREQ_ABTS_PENDING
;
2083 BUG_ON(io_req
->abts_done
);
2085 if (CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
) {
2086 abt_tag
|= FNIC_TAG_DEV_RST
;
2087 FNIC_SCSI_DBG(KERN_INFO
, fnic
->lport
->host
,
2088 "%s: dev rst sc 0x%p\n", __func__
, sc
);
2091 CMD_ABTS_STATUS(sc
) = FCPIO_INVALID_CODE
;
2092 io_req
->abts_done
= &tm_done
;
2093 spin_unlock_irqrestore(io_lock
, flags
);
2095 /* Now queue the abort command to firmware */
2096 int_to_scsilun(sc
->device
->lun
, &fc_lun
);
2098 if (fnic_queue_abort_io_req(fnic
, abt_tag
,
2099 FCPIO_ITMF_ABT_TASK_TERM
,
2100 fc_lun
.scsi_lun
, io_req
)) {
2101 spin_lock_irqsave(io_lock
, flags
);
2102 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2104 io_req
->abts_done
= NULL
;
2105 if (CMD_STATE(sc
) == FNIC_IOREQ_ABTS_PENDING
)
2106 CMD_STATE(sc
) = old_ioreq_state
;
2107 spin_unlock_irqrestore(io_lock
, flags
);
2108 iter_data
->ret
= FAILED
;
2111 spin_lock_irqsave(io_lock
, flags
);
2112 if (CMD_FLAGS(sc
) & FNIC_DEVICE_RESET
)
2113 CMD_FLAGS(sc
) |= FNIC_DEV_RST_TERM_ISSUED
;
2114 spin_unlock_irqrestore(io_lock
, flags
);
2116 CMD_FLAGS(sc
) |= FNIC_IO_INTERNAL_TERM_ISSUED
;
2118 wait_for_completion_timeout(&tm_done
, msecs_to_jiffies
2119 (fnic
->config
.ed_tov
));
2121 /* Recheck cmd state to check if it is now aborted */
2122 spin_lock_irqsave(io_lock
, flags
);
2123 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2125 spin_unlock_irqrestore(io_lock
, flags
);
2126 CMD_FLAGS(sc
) |= FNIC_IO_ABT_TERM_REQ_NULL
;
2130 io_req
->abts_done
= NULL
;
2132 /* if abort is still pending with fw, fail */
2133 if (CMD_ABTS_STATUS(sc
) == FCPIO_INVALID_CODE
) {
2134 spin_unlock_irqrestore(io_lock
, flags
);
2135 CMD_FLAGS(sc
) |= FNIC_IO_ABT_TERM_DONE
;
2136 iter_data
->ret
= FAILED
;
2139 CMD_STATE(sc
) = FNIC_IOREQ_ABTS_COMPLETE
;
2141 /* original sc used for lr is handled by dev reset code */
2142 if (sc
!= iter_data
->lr_sc
)
2144 spin_unlock_irqrestore(io_lock
, flags
);
2146 /* original sc used for lr is handled by dev reset code */
2147 if (sc
!= iter_data
->lr_sc
) {
2148 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
2149 mempool_free(io_req
, fnic
->io_req_pool
);
2153 * Any IO is returned during reset, it needs to call scsi_done
2154 * to return the scsi_cmnd to upper layer.
2156 if (sc
->scsi_done
) {
2157 /* Set result to let upper SCSI layer retry */
2158 sc
->result
= DID_RESET
<< 16;
2165 * Clean up any pending aborts on the lun
2166 * For each outstanding IO on this lun, whose abort is not completed by fw,
2167 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2168 * successfully aborted, 1 otherwise
2170 static int fnic_clean_pending_aborts(struct fnic
*fnic
,
2171 struct scsi_cmnd
*lr_sc
,
2176 struct fnic_pending_aborts_iter_data iter_data
= {
2178 .lun_dev
= lr_sc
->device
,
2183 iter_data
.lr_sc
= lr_sc
;
2185 scsi_host_busy_iter(fnic
->lport
->host
,
2186 fnic_pending_aborts_iter
, &iter_data
);
2187 if (iter_data
.ret
== FAILED
) {
2188 ret
= iter_data
.ret
;
2189 goto clean_pending_aborts_end
;
2191 schedule_timeout(msecs_to_jiffies(2 * fnic
->config
.ed_tov
));
2193 /* walk again to check, if IOs are still pending in fw */
2194 if (fnic_is_abts_pending(fnic
, lr_sc
))
2197 clean_pending_aborts_end
:
2202 * fnic_scsi_host_start_tag
2203 * Allocates tagid from host's tag list
2206 fnic_scsi_host_start_tag(struct fnic
*fnic
, struct scsi_cmnd
*sc
)
2208 struct request
*rq
= scsi_cmd_to_rq(sc
);
2209 struct request_queue
*q
= rq
->q
;
2210 struct request
*dummy
;
2212 dummy
= blk_mq_alloc_request(q
, REQ_OP_WRITE
, BLK_MQ_REQ_NOWAIT
);
2216 sc
->tag
= rq
->tag
= dummy
->tag
;
2217 sc
->host_scribble
= (unsigned char *)dummy
;
2223 * fnic_scsi_host_end_tag
2224 * frees tag allocated by fnic_scsi_host_start_tag.
2227 fnic_scsi_host_end_tag(struct fnic
*fnic
, struct scsi_cmnd
*sc
)
2229 struct request
*dummy
= (struct request
*)sc
->host_scribble
;
2231 blk_mq_free_request(dummy
);
2235 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2236 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2239 int fnic_device_reset(struct scsi_cmnd
*sc
)
2241 struct request
*rq
= scsi_cmd_to_rq(sc
);
2242 struct fc_lport
*lp
;
2244 struct fnic_io_req
*io_req
= NULL
;
2245 struct fc_rport
*rport
;
2248 spinlock_t
*io_lock
;
2249 unsigned long flags
;
2250 unsigned long start_time
= 0;
2251 struct scsi_lun fc_lun
;
2252 struct fnic_stats
*fnic_stats
;
2253 struct reset_stats
*reset_stats
;
2255 DECLARE_COMPLETION_ONSTACK(tm_done
);
2256 int tag_gen_flag
= 0; /*to track tags allocated by fnic driver*/
2259 /* Wait for rport to unblock */
2260 fc_block_scsi_eh(sc
);
2262 /* Get local-port, check ready and link up */
2263 lp
= shost_priv(sc
->device
->host
);
2265 fnic
= lport_priv(lp
);
2266 fnic_stats
= &fnic
->fnic_stats
;
2267 reset_stats
= &fnic
->fnic_stats
.reset_stats
;
2269 atomic64_inc(&reset_stats
->device_resets
);
2271 rport
= starget_to_rport(scsi_target(sc
->device
));
2272 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2273 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2274 rport
->port_id
, sc
->device
->lun
, sc
);
2276 if (lp
->state
!= LPORT_ST_READY
|| !(lp
->link_up
))
2277 goto fnic_device_reset_end
;
2279 /* Check if remote port up */
2280 if (fc_remote_port_chkready(rport
)) {
2281 atomic64_inc(&fnic_stats
->misc_stats
.rport_not_ready
);
2282 goto fnic_device_reset_end
;
2285 CMD_FLAGS(sc
) = FNIC_DEVICE_RESET
;
2286 /* Allocate tag if not present */
2288 if (unlikely(tag
< 0)) {
2290 * Really should fix the midlayer to pass in a proper
2291 * request for ioctls...
2293 tag
= fnic_scsi_host_start_tag(fnic
, sc
);
2294 if (unlikely(tag
== SCSI_NO_TAG
))
2295 goto fnic_device_reset_end
;
2299 io_lock
= fnic_io_lock_hash(fnic
, sc
);
2300 spin_lock_irqsave(io_lock
, flags
);
2301 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2304 * If there is a io_req attached to this command, then use it,
2305 * else allocate a new one.
2308 io_req
= mempool_alloc(fnic
->io_req_pool
, GFP_ATOMIC
);
2310 spin_unlock_irqrestore(io_lock
, flags
);
2311 goto fnic_device_reset_end
;
2313 memset(io_req
, 0, sizeof(*io_req
));
2314 io_req
->port_id
= rport
->port_id
;
2315 CMD_SP(sc
) = (char *)io_req
;
2317 io_req
->dr_done
= &tm_done
;
2318 CMD_STATE(sc
) = FNIC_IOREQ_CMD_PENDING
;
2319 CMD_LR_STATUS(sc
) = FCPIO_INVALID_CODE
;
2320 spin_unlock_irqrestore(io_lock
, flags
);
2322 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
, "TAG %x\n", tag
);
2325 * issue the device reset, if enqueue failed, clean up the ioreq
2326 * and break assoc with scsi cmd
2328 if (fnic_queue_dr_io_req(fnic
, sc
, io_req
)) {
2329 spin_lock_irqsave(io_lock
, flags
);
2330 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2332 io_req
->dr_done
= NULL
;
2333 goto fnic_device_reset_clean
;
2335 spin_lock_irqsave(io_lock
, flags
);
2336 CMD_FLAGS(sc
) |= FNIC_DEV_RST_ISSUED
;
2337 spin_unlock_irqrestore(io_lock
, flags
);
2340 * Wait on the local completion for LUN reset. The io_req may be
2341 * freed while we wait since we hold no lock.
2343 wait_for_completion_timeout(&tm_done
,
2344 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT
));
2346 spin_lock_irqsave(io_lock
, flags
);
2347 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2349 spin_unlock_irqrestore(io_lock
, flags
);
2350 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2351 "io_req is null tag 0x%x sc 0x%p\n", tag
, sc
);
2352 goto fnic_device_reset_end
;
2354 io_req
->dr_done
= NULL
;
2356 status
= CMD_LR_STATUS(sc
);
2359 * If lun reset not completed, bail out with failed. io_req
2360 * gets cleaned up during higher levels of EH
2362 if (status
== FCPIO_INVALID_CODE
) {
2363 atomic64_inc(&reset_stats
->device_reset_timeouts
);
2364 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2365 "Device reset timed out\n");
2366 CMD_FLAGS(sc
) |= FNIC_DEV_RST_TIMED_OUT
;
2367 spin_unlock_irqrestore(io_lock
, flags
);
2368 int_to_scsilun(sc
->device
->lun
, &fc_lun
);
2370 * Issue abort and terminate on device reset request.
2371 * If q'ing of terminate fails, retry it after a delay.
2374 spin_lock_irqsave(io_lock
, flags
);
2375 if (CMD_FLAGS(sc
) & FNIC_DEV_RST_TERM_ISSUED
) {
2376 spin_unlock_irqrestore(io_lock
, flags
);
2379 spin_unlock_irqrestore(io_lock
, flags
);
2380 if (fnic_queue_abort_io_req(fnic
,
2381 tag
| FNIC_TAG_DEV_RST
,
2382 FCPIO_ITMF_ABT_TASK_TERM
,
2383 fc_lun
.scsi_lun
, io_req
)) {
2384 wait_for_completion_timeout(&tm_done
,
2385 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT
));
2387 spin_lock_irqsave(io_lock
, flags
);
2388 CMD_FLAGS(sc
) |= FNIC_DEV_RST_TERM_ISSUED
;
2389 CMD_STATE(sc
) = FNIC_IOREQ_ABTS_PENDING
;
2390 io_req
->abts_done
= &tm_done
;
2391 spin_unlock_irqrestore(io_lock
, flags
);
2392 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2393 "Abort and terminate issued on Device reset "
2394 "tag 0x%x sc 0x%p\n", tag
, sc
);
2399 spin_lock_irqsave(io_lock
, flags
);
2400 if (!(CMD_FLAGS(sc
) & FNIC_DEV_RST_DONE
)) {
2401 spin_unlock_irqrestore(io_lock
, flags
);
2402 wait_for_completion_timeout(&tm_done
,
2403 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT
));
2406 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2407 io_req
->abts_done
= NULL
;
2408 goto fnic_device_reset_clean
;
2412 spin_unlock_irqrestore(io_lock
, flags
);
2415 /* Completed, but not successful, clean up the io_req, return fail */
2416 if (status
!= FCPIO_SUCCESS
) {
2417 spin_lock_irqsave(io_lock
, flags
);
2418 FNIC_SCSI_DBG(KERN_DEBUG
,
2420 "Device reset completed - failed\n");
2421 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2422 goto fnic_device_reset_clean
;
2426 * Clean up any aborts on this lun that have still not
2427 * completed. If any of these fail, then LUN reset fails.
2428 * clean_pending_aborts cleans all cmds on this lun except
2429 * the lun reset cmd. If all cmds get cleaned, the lun reset
2432 if (fnic_clean_pending_aborts(fnic
, sc
, new_sc
)) {
2433 spin_lock_irqsave(io_lock
, flags
);
2434 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2435 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2436 "Device reset failed"
2437 " since could not abort all IOs\n");
2438 goto fnic_device_reset_clean
;
2441 /* Clean lun reset command */
2442 spin_lock_irqsave(io_lock
, flags
);
2443 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2445 /* Completed, and successful */
2448 fnic_device_reset_clean
:
2452 spin_unlock_irqrestore(io_lock
, flags
);
2455 start_time
= io_req
->start_time
;
2456 fnic_release_ioreq_buf(fnic
, io_req
, sc
);
2457 mempool_free(io_req
, fnic
->io_req_pool
);
2460 fnic_device_reset_end
:
2461 FNIC_TRACE(fnic_device_reset
, sc
->device
->host
->host_no
, rq
->tag
, sc
,
2462 jiffies_to_msecs(jiffies
- start_time
),
2463 0, ((u64
)sc
->cmnd
[0] << 32 |
2464 (u64
)sc
->cmnd
[2] << 24 | (u64
)sc
->cmnd
[3] << 16 |
2465 (u64
)sc
->cmnd
[4] << 8 | sc
->cmnd
[5]),
2466 (((u64
)CMD_FLAGS(sc
) << 32) | CMD_STATE(sc
)));
2468 /* free tag if it is allocated */
2469 if (unlikely(tag_gen_flag
))
2470 fnic_scsi_host_end_tag(fnic
, sc
);
2472 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2473 "Returning from device reset %s\n",
2475 "SUCCESS" : "FAILED");
2478 atomic64_inc(&reset_stats
->device_reset_failures
);
2483 /* Clean up all IOs, clean up libFC local port */
2484 int fnic_reset(struct Scsi_Host
*shost
)
2486 struct fc_lport
*lp
;
2489 struct reset_stats
*reset_stats
;
2491 lp
= shost_priv(shost
);
2492 fnic
= lport_priv(lp
);
2493 reset_stats
= &fnic
->fnic_stats
.reset_stats
;
2495 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2496 "fnic_reset called\n");
2498 atomic64_inc(&reset_stats
->fnic_resets
);
2501 * Reset local port, this will clean up libFC exchanges,
2502 * reset remote port sessions, and if link is up, begin flogi
2504 ret
= fc_lport_reset(lp
);
2506 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2507 "Returning from fnic reset %s\n",
2509 "SUCCESS" : "FAILED");
2512 atomic64_inc(&reset_stats
->fnic_reset_completions
);
2514 atomic64_inc(&reset_stats
->fnic_reset_failures
);
2520 * SCSI Error handling calls driver's eh_host_reset if all prior
2521 * error handling levels return FAILED. If host reset completes
2522 * successfully, and if link is up, then Fabric login begins.
2524 * Host Reset is the highest level of error recovery. If this fails, then
2525 * host is offlined by SCSI.
2528 int fnic_host_reset(struct scsi_cmnd
*sc
)
2531 unsigned long wait_host_tmo
;
2532 struct Scsi_Host
*shost
= sc
->device
->host
;
2533 struct fc_lport
*lp
= shost_priv(shost
);
2534 struct fnic
*fnic
= lport_priv(lp
);
2535 unsigned long flags
;
2537 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
2538 if (!fnic
->internal_reset_inprogress
) {
2539 fnic
->internal_reset_inprogress
= true;
2541 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2542 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2543 "host reset in progress skipping another host reset\n");
2546 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2549 * If fnic_reset is successful, wait for fabric login to complete
2550 * scsi-ml tries to send a TUR to every device if host reset is
2551 * successful, so before returning to scsi, fabric should be up
2553 ret
= (fnic_reset(shost
) == 0) ? SUCCESS
: FAILED
;
2554 if (ret
== SUCCESS
) {
2555 wait_host_tmo
= jiffies
+ FNIC_HOST_RESET_SETTLE_TIME
* HZ
;
2557 while (time_before(jiffies
, wait_host_tmo
)) {
2558 if ((lp
->state
== LPORT_ST_READY
) &&
2567 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
2568 fnic
->internal_reset_inprogress
= false;
2569 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2574 * This fxn is called from libFC when host is removed
2576 void fnic_scsi_abort_io(struct fc_lport
*lp
)
2579 unsigned long flags
;
2580 enum fnic_state old_state
;
2581 struct fnic
*fnic
= lport_priv(lp
);
2582 DECLARE_COMPLETION_ONSTACK(remove_wait
);
2584 /* Issue firmware reset for fnic, wait for reset to complete */
2586 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
2587 if (unlikely(fnic
->state
== FNIC_IN_FC_TRANS_ETH_MODE
) &&
2588 fnic
->link_events
) {
2589 /* fw reset is in progress, poll for its completion */
2590 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2591 schedule_timeout(msecs_to_jiffies(100));
2592 goto retry_fw_reset
;
2595 fnic
->remove_wait
= &remove_wait
;
2596 old_state
= fnic
->state
;
2597 fnic
->state
= FNIC_IN_FC_TRANS_ETH_MODE
;
2598 fnic_update_mac_locked(fnic
, fnic
->ctlr
.ctl_src_addr
);
2599 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2601 err
= fnic_fw_reset_handler(fnic
);
2603 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
2604 if (fnic
->state
== FNIC_IN_FC_TRANS_ETH_MODE
)
2605 fnic
->state
= old_state
;
2606 fnic
->remove_wait
= NULL
;
2607 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2611 /* Wait for firmware reset to complete */
2612 wait_for_completion_timeout(&remove_wait
,
2613 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT
));
2615 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
2616 fnic
->remove_wait
= NULL
;
2617 FNIC_SCSI_DBG(KERN_DEBUG
, fnic
->lport
->host
,
2618 "fnic_scsi_abort_io %s\n",
2619 (fnic
->state
== FNIC_IN_ETH_MODE
) ?
2620 "SUCCESS" : "FAILED");
2621 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2626 * This fxn called from libFC to clean up driver IO state on link down
2628 void fnic_scsi_cleanup(struct fc_lport
*lp
)
2630 unsigned long flags
;
2631 enum fnic_state old_state
;
2632 struct fnic
*fnic
= lport_priv(lp
);
2634 /* issue fw reset */
2636 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
2637 if (unlikely(fnic
->state
== FNIC_IN_FC_TRANS_ETH_MODE
)) {
2638 /* fw reset is in progress, poll for its completion */
2639 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2640 schedule_timeout(msecs_to_jiffies(100));
2641 goto retry_fw_reset
;
2643 old_state
= fnic
->state
;
2644 fnic
->state
= FNIC_IN_FC_TRANS_ETH_MODE
;
2645 fnic_update_mac_locked(fnic
, fnic
->ctlr
.ctl_src_addr
);
2646 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2648 if (fnic_fw_reset_handler(fnic
)) {
2649 spin_lock_irqsave(&fnic
->fnic_lock
, flags
);
2650 if (fnic
->state
== FNIC_IN_FC_TRANS_ETH_MODE
)
2651 fnic
->state
= old_state
;
2652 spin_unlock_irqrestore(&fnic
->fnic_lock
, flags
);
2657 void fnic_empty_scsi_cleanup(struct fc_lport
*lp
)
2661 void fnic_exch_mgr_reset(struct fc_lport
*lp
, u32 sid
, u32 did
)
2663 struct fnic
*fnic
= lport_priv(lp
);
2665 /* Non-zero sid, nothing to do */
2667 goto call_fc_exch_mgr_reset
;
2670 fnic_rport_exch_reset(fnic
, did
);
2671 goto call_fc_exch_mgr_reset
;
2676 * link down or device being removed
2678 if (!fnic
->in_remove
)
2679 fnic_scsi_cleanup(lp
);
2681 fnic_scsi_abort_io(lp
);
2683 /* call libFC exch mgr reset to reset its exchanges */
2684 call_fc_exch_mgr_reset
:
2685 fc_exch_mgr_reset(lp
, sid
, did
);
2689 static bool fnic_abts_pending_iter(struct scsi_cmnd
*sc
, void *data
,
2692 struct fnic_pending_aborts_iter_data
*iter_data
= data
;
2693 struct fnic
*fnic
= iter_data
->fnic
;
2695 struct fnic_io_req
*io_req
;
2696 spinlock_t
*io_lock
;
2697 unsigned long flags
;
2700 * ignore this lun reset cmd or cmds that do not belong to
2703 if (iter_data
->lr_sc
&& sc
== iter_data
->lr_sc
)
2705 if (iter_data
->lun_dev
&& sc
->device
!= iter_data
->lun_dev
)
2708 io_lock
= fnic_io_lock_hash(fnic
, sc
);
2709 spin_lock_irqsave(io_lock
, flags
);
2711 io_req
= (struct fnic_io_req
*)CMD_SP(sc
);
2713 spin_unlock_irqrestore(io_lock
, flags
);
2718 * Found IO that is still pending with firmware and
2719 * belongs to the LUN that we are resetting
2721 FNIC_SCSI_DBG(KERN_INFO
, fnic
->lport
->host
,
2722 "Found IO in %s on lun\n",
2723 fnic_ioreq_state_to_str(CMD_STATE(sc
)));
2724 cmd_state
= CMD_STATE(sc
);
2725 spin_unlock_irqrestore(io_lock
, flags
);
2726 if (cmd_state
== FNIC_IOREQ_ABTS_PENDING
)
2729 return iter_data
->ret
? false : true;
2733 * fnic_is_abts_pending() is a helper function that
2734 * walks through tag map to check if there is any IOs pending,if there is one,
2735 * then it returns 1 (true), otherwise 0 (false)
2736 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2737 * otherwise, it checks for all IOs.
2739 int fnic_is_abts_pending(struct fnic
*fnic
, struct scsi_cmnd
*lr_sc
)
2741 struct fnic_pending_aborts_iter_data iter_data
= {
2748 iter_data
.lun_dev
= lr_sc
->device
;
2749 iter_data
.lr_sc
= lr_sc
;
2752 /* walk again to check, if IOs are still pending in fw */
2753 scsi_host_busy_iter(fnic
->lport
->host
,
2754 fnic_abts_pending_iter
, &iter_data
);
2756 return iter_data
.ret
;