2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * Maintained at www.Open-FCoE.org
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/spinlock.h>
27 #include <linux/scatterlist.h>
28 #include <linux/err.h>
29 #include <linux/crc32.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_cmnd.h>
37 #include <scsi/fc/fc_fc2.h>
39 #include <scsi/libfc.h>
40 #include <scsi/fc_encode.h>
42 MODULE_AUTHOR("Open-FCoE.org");
43 MODULE_DESCRIPTION("libfc");
44 MODULE_LICENSE("GPL");
46 static int fc_fcp_debug
;
48 #define FC_DEBUG_FCP(fmt...) \
54 static struct kmem_cache
*scsi_pkt_cachep
;
56 /* SRB state definitions */
57 #define FC_SRB_FREE 0 /* cmd is free */
58 #define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
59 #define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
60 #define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
61 #define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
62 #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
63 #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
64 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
65 #define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
67 #define FC_SRB_READ (1 << 1)
68 #define FC_SRB_WRITE (1 << 0)
71 * The SCp.ptr should be tested and set under the host lock. NULL indicates
72 * that the command has been retruned to the scsi layer.
74 #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
75 #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
76 #define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
77 #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
78 #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
80 struct fc_fcp_internal
{
81 mempool_t
*scsi_pkt_pool
;
82 struct list_head scsi_pkt_queue
;
86 #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
90 * FC scsi I/O related functions
92 static void fc_fcp_recv_data(struct fc_fcp_pkt
*, struct fc_frame
*);
93 static void fc_fcp_recv(struct fc_seq
*, struct fc_frame
*, void *);
94 static void fc_fcp_resp(struct fc_fcp_pkt
*, struct fc_frame
*);
95 static void fc_fcp_complete_locked(struct fc_fcp_pkt
*);
96 static void fc_tm_done(struct fc_seq
*, struct fc_frame
*, void *);
97 static void fc_fcp_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
);
98 static void fc_timeout_error(struct fc_fcp_pkt
*);
99 static void fc_fcp_timeout(unsigned long data
);
100 static void fc_fcp_rec(struct fc_fcp_pkt
*);
101 static void fc_fcp_rec_error(struct fc_fcp_pkt
*, struct fc_frame
*);
102 static void fc_fcp_rec_resp(struct fc_seq
*, struct fc_frame
*, void *);
103 static void fc_io_compl(struct fc_fcp_pkt
*);
105 static void fc_fcp_srr(struct fc_fcp_pkt
*, enum fc_rctl
, u32
);
106 static void fc_fcp_srr_resp(struct fc_seq
*, struct fc_frame
*, void *);
107 static void fc_fcp_srr_error(struct fc_fcp_pkt
*, struct fc_frame
*);
110 * command status codes
112 #define FC_COMPLETE 0
113 #define FC_CMD_ABORTED 1
114 #define FC_CMD_RESET 2
115 #define FC_CMD_PLOGO 3
117 #define FC_TRANS_ERR 5
118 #define FC_DATA_OVRRUN 6
119 #define FC_DATA_UNDRUN 7
121 #define FC_HRD_ERROR 9
122 #define FC_CMD_TIME_OUT 10
125 * Error recovery timeout values.
127 #define FC_SCSI_ER_TIMEOUT (10 * HZ)
128 #define FC_SCSI_TM_TOV (10 * HZ)
129 #define FC_SCSI_REC_TOV (2 * HZ)
130 #define FC_HOST_RESET_TIMEOUT (30 * HZ)
132 #define FC_MAX_ERROR_CNT 5
133 #define FC_MAX_RECOV_RETRY 3
135 #define FC_FCP_DFLT_QUEUE_DEPTH 32
138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
139 * @lp: fc lport struct
140 * @gfp: gfp flags for allocation
142 * This is used by upper layer scsi driver.
143 * Return Value : scsi_pkt structure or null on allocation failure.
144 * Context : call from process context. no locking required.
146 static struct fc_fcp_pkt
*fc_fcp_pkt_alloc(struct fc_lport
*lp
, gfp_t gfp
)
148 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
149 struct fc_fcp_pkt
*fsp
;
151 fsp
= mempool_alloc(si
->scsi_pkt_pool
, gfp
);
153 memset(fsp
, 0, sizeof(*fsp
));
155 atomic_set(&fsp
->ref_cnt
, 1);
156 init_timer(&fsp
->timer
);
157 INIT_LIST_HEAD(&fsp
->list
);
158 spin_lock_init(&fsp
->scsi_pkt_lock
);
164 * fc_fcp_pkt_release() - release hold on scsi_pkt packet
165 * @fsp: fcp packet struct
167 * This is used by upper layer scsi driver.
168 * Context : call from process and interrupt context.
169 * no locking required
171 static void fc_fcp_pkt_release(struct fc_fcp_pkt
*fsp
)
173 if (atomic_dec_and_test(&fsp
->ref_cnt
)) {
174 struct fc_fcp_internal
*si
= fc_get_scsi_internal(fsp
->lp
);
176 mempool_free(fsp
, si
->scsi_pkt_pool
);
180 static void fc_fcp_pkt_hold(struct fc_fcp_pkt
*fsp
)
182 atomic_inc(&fsp
->ref_cnt
);
186 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet
187 * @seq: exchange sequence
188 * @fsp: fcp packet struct
190 * Release hold on scsi_pkt packet set to keep scsi_pkt
191 * till EM layer exch resource is not freed.
192 * Context : called from from EM layer.
193 * no locking required
195 static void fc_fcp_pkt_destroy(struct fc_seq
*seq
, void *fsp
)
197 fc_fcp_pkt_release(fsp
);
201 * fc_fcp_lock_pkt() - lock a packet and get a ref to it.
204 * We should only return error if we return a command to scsi-ml before
205 * getting a response. This can happen in cases where we send a abort, but
206 * do not wait for the response and the abort and command can be passing
207 * each other on the wire/network-layer.
209 * Note: this function locks the packet and gets a reference to allow
210 * callers to call the completion function while the lock is held and
211 * not have to worry about the packets refcount.
213 * TODO: Maybe we should just have callers grab/release the lock and
214 * have a function that they call to verify the fsp and grab a ref if
217 static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt
*fsp
)
219 spin_lock_bh(&fsp
->scsi_pkt_lock
);
220 if (fsp
->state
& FC_SRB_COMPL
) {
221 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
225 fc_fcp_pkt_hold(fsp
);
229 static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt
*fsp
)
231 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
232 fc_fcp_pkt_release(fsp
);
235 static void fc_fcp_timer_set(struct fc_fcp_pkt
*fsp
, unsigned long delay
)
237 if (!(fsp
->state
& FC_SRB_COMPL
))
238 mod_timer(&fsp
->timer
, jiffies
+ delay
);
241 static int fc_fcp_send_abort(struct fc_fcp_pkt
*fsp
)
246 fsp
->state
|= FC_SRB_ABORT_PENDING
;
247 return fsp
->lp
->tt
.seq_exch_abort(fsp
->seq_ptr
, 0);
252 * An abort isn't needed.
254 static void fc_fcp_retry_cmd(struct fc_fcp_pkt
*fsp
)
257 fsp
->lp
->tt
.exch_done(fsp
->seq_ptr
);
261 fsp
->state
&= ~FC_SRB_ABORT_PENDING
;
263 fsp
->status_code
= FC_ERROR
;
264 fc_fcp_complete_locked(fsp
);
268 * Receive SCSI data from target.
269 * Called after receiving solicited data.
271 static void fc_fcp_recv_data(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
273 struct scsi_cmnd
*sc
= fsp
->cmd
;
274 struct fc_lport
*lp
= fsp
->lp
;
275 struct fcoe_dev_stats
*stats
;
276 struct fc_frame_header
*fh
;
283 struct scatterlist
*sg
;
286 fh
= fc_frame_header_get(fp
);
287 offset
= ntohl(fh
->fh_parm_offset
);
288 start_offset
= offset
;
289 len
= fr_len(fp
) - sizeof(*fh
);
290 buf
= fc_frame_payload_get(fp
, 0);
292 if (offset
+ len
> fsp
->data_len
) {
293 /* this should never happen */
294 if ((fr_flags(fp
) & FCPHF_CRC_UNCHECKED
) &&
295 fc_frame_crc_check(fp
))
297 FC_DEBUG_FCP("data received past end. len %zx offset %zx "
298 "data_len %x\n", len
, offset
, fsp
->data_len
);
299 fc_fcp_retry_cmd(fsp
);
302 if (offset
!= fsp
->xfer_len
)
303 fsp
->state
|= FC_SRB_DISCONTIG
;
306 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
)
307 crc
= crc32(~0, (u8
*) fh
, sizeof(*fh
));
309 sg
= scsi_sglist(sc
);
312 while (remaining
> 0 && sg
) {
317 if (offset
>= sg
->length
) {
318 offset
-= sg
->length
;
322 sg_bytes
= min(remaining
, sg
->length
- offset
);
325 * The scatterlist item may be bigger than PAGE_SIZE,
326 * but we are limited to mapping PAGE_SIZE at a time.
328 off
= offset
+ sg
->offset
;
329 sg_bytes
= min(sg_bytes
, (size_t)
330 (PAGE_SIZE
- (off
& ~PAGE_MASK
)));
331 page_addr
= kmap_atomic(sg_page(sg
) + (off
>> PAGE_SHIFT
),
334 break; /* XXX panic? */
336 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
)
337 crc
= crc32(crc
, buf
, sg_bytes
);
338 memcpy((char *)page_addr
+ (off
& ~PAGE_MASK
), buf
,
341 kunmap_atomic(page_addr
, KM_SOFTIRQ0
);
344 remaining
-= sg_bytes
;
345 copy_len
+= sg_bytes
;
348 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
) {
349 buf
= fc_frame_payload_get(fp
, 0);
351 crc
= crc32(crc
, buf
+ len
, 4 - (len
% 4));
352 len
+= 4 - (len
% 4);
355 if (~crc
!= le32_to_cpu(fr_crc(fp
))) {
357 stats
= lp
->dev_stats
[smp_processor_id()];
358 stats
->ErrorFrames
++;
359 if (stats
->InvalidCRCCount
++ < 5)
360 FC_DBG("CRC error on data frame\n");
362 * Assume the frame is total garbage.
363 * We may have copied it over the good part
365 * If so, we need to retry the entire operation.
366 * Otherwise, ignore it.
368 if (fsp
->state
& FC_SRB_DISCONTIG
)
369 fc_fcp_retry_cmd(fsp
);
374 if (fsp
->xfer_contig_end
== start_offset
)
375 fsp
->xfer_contig_end
+= copy_len
;
376 fsp
->xfer_len
+= copy_len
;
379 * In the very rare event that this data arrived after the response
380 * and completes the transfer, call the completion handler.
382 if (unlikely(fsp
->state
& FC_SRB_RCV_STATUS
) &&
383 fsp
->xfer_len
== fsp
->data_len
- fsp
->scsi_resid
)
384 fc_fcp_complete_locked(fsp
);
388 * fc_fcp_send_data() - Send SCSI data to target.
389 * @fsp: ptr to fc_fcp_pkt
390 * @sp: ptr to this sequence
391 * @offset: starting offset for this data request
392 * @seq_blen: the burst length for this data request
394 * Called after receiving a Transfer Ready data descriptor.
395 * if LLD is capable of seq offload then send down seq_blen
396 * size of data in single frame, otherwise send multiple FC
397 * frames of max FC frame payload supported by target port.
399 * Returns : 0 for success.
401 static int fc_fcp_send_data(struct fc_fcp_pkt
*fsp
, struct fc_seq
*seq
,
402 size_t offset
, size_t seq_blen
)
405 struct scsi_cmnd
*sc
;
406 struct scatterlist
*sg
;
407 struct fc_frame
*fp
= NULL
;
408 struct fc_lport
*lp
= fsp
->lp
;
413 size_t frame_offset
, fh_parm_offset
;
417 int using_sg
= lp
->sg_supp
;
420 WARN_ON(seq_blen
<= 0);
421 if (unlikely(offset
+ seq_blen
> fsp
->data_len
)) {
422 /* this should never happen */
423 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
425 fc_fcp_send_abort(fsp
);
427 } else if (offset
!= fsp
->xfer_len
) {
428 /* Out of Order Data Request - no problem, but unexpected. */
429 FC_DEBUG_FCP("xfer-ready non-contiguous. "
430 "seq_blen %zx offset %zx\n", seq_blen
, offset
);
434 * if LLD is capable of seq_offload then set transport
435 * burst length (t_blen) to seq_blen, otherwise set t_blen
436 * to max FC frame payload previously set in fsp->max_payload.
438 t_blen
= fsp
->max_payload
;
439 if (lp
->seq_offload
) {
440 t_blen
= min(seq_blen
, (size_t)lp
->lso_max
);
441 FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
442 fsp
, seq_blen
, lp
->lso_max
, t_blen
);
445 WARN_ON(t_blen
< FC_MIN_MAX_PAYLOAD
);
447 t_blen
&= ~(512 - 1); /* round down to block size */
448 WARN_ON(t_blen
< FC_MIN_MAX_PAYLOAD
); /* won't go below 256 */
451 remaining
= seq_blen
;
452 fh_parm_offset
= frame_offset
= offset
;
454 seq
= lp
->tt
.seq_start_next(seq
);
455 f_ctl
= FC_FC_REL_OFF
;
459 * If a get_page()/put_page() will fail, don't use sg lists
460 * in the fc_frame structure.
462 * The put_page() may be long after the I/O has completed
463 * in the case of FCoE, since the network driver does it
464 * via free_skb(). See the test in free_pages_check().
466 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
469 for (sg
= scsi_sglist(sc
); sg
; sg
= sg_next(sg
)) {
470 if (page_count(sg_page(sg
)) == 0 ||
471 (sg_page(sg
)->flags
& (1 << PG_lru
|
485 sg
= scsi_sglist(sc
);
487 while (remaining
> 0 && sg
) {
488 if (offset
>= sg
->length
) {
489 offset
-= sg
->length
;
494 tlen
= min(t_blen
, remaining
);
497 * TODO. Temporary workaround. fc_seq_send() can't
498 * handle odd lengths in non-linear skbs.
499 * This will be the final fragment only.
504 fp
= _fc_frame_alloc(lp
, 0);
508 fp
= fc_frame_alloc(lp
, tlen
);
512 data
= (void *)(fr_hdr(fp
)) +
513 sizeof(struct fc_frame_header
);
515 fh_parm_offset
= frame_offset
;
516 fr_max_payload(fp
) = fsp
->max_payload
;
518 sg_bytes
= min(tlen
, sg
->length
- offset
);
520 WARN_ON(skb_shinfo(fp_skb(fp
))->nr_frags
>
522 get_page(sg_page(sg
));
523 skb_fill_page_desc(fp_skb(fp
),
524 skb_shinfo(fp_skb(fp
))->nr_frags
,
525 sg_page(sg
), sg
->offset
+ offset
,
527 fp_skb(fp
)->data_len
+= sg_bytes
;
528 fr_len(fp
) += sg_bytes
;
529 fp_skb(fp
)->truesize
+= PAGE_SIZE
;
531 size_t off
= offset
+ sg
->offset
;
534 * The scatterlist item may be bigger than PAGE_SIZE,
535 * but we must not cross pages inside the kmap.
537 sg_bytes
= min(sg_bytes
, (size_t) (PAGE_SIZE
-
538 (off
& ~PAGE_MASK
)));
539 page_addr
= kmap_atomic(sg_page(sg
) +
542 memcpy(data
, (char *)page_addr
+ (off
& ~PAGE_MASK
),
544 kunmap_atomic(page_addr
, KM_SOFTIRQ0
);
548 frame_offset
+= sg_bytes
;
550 remaining
-= sg_bytes
;
556 * Send sequence with transfer sequence initiative in case
557 * this is last FCP frame of the sequence.
560 f_ctl
|= FC_FC_SEQ_INIT
| FC_FC_END_SEQ
;
562 ep
= fc_seq_exch(seq
);
563 fc_fill_fc_hdr(fp
, FC_RCTL_DD_SOL_DATA
, ep
->did
, ep
->sid
,
564 FC_TYPE_FCP
, f_ctl
, fh_parm_offset
);
567 * send fragment using for a sequence.
569 error
= lp
->tt
.seq_send(lp
, seq
, fp
);
571 WARN_ON(1); /* send error should be rare */
572 fc_fcp_retry_cmd(fsp
);
577 fsp
->xfer_len
+= seq_blen
; /* premature count? */
581 static void fc_fcp_abts_resp(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
584 struct fc_ba_rjt
*brp
;
585 struct fc_frame_header
*fh
;
587 fh
= fc_frame_header_get(fp
);
588 switch (fh
->fh_r_ctl
) {
592 brp
= fc_frame_payload_get(fp
, sizeof(*brp
));
593 if (brp
&& brp
->br_reason
== FC_BA_RJT_LOG_ERR
)
598 * we will let the command timeout
599 * and scsi-ml recover in this case,
600 * therefore cleared the ba_done flag.
606 fsp
->state
|= FC_SRB_ABORTED
;
607 fsp
->state
&= ~FC_SRB_ABORT_PENDING
;
609 if (fsp
->wait_for_comp
)
610 complete(&fsp
->tm_done
);
612 fc_fcp_complete_locked(fsp
);
617 * fc_fcp_reduce_can_queue() - drop can_queue
618 * @lp: lport to drop queueing for
620 * If we are getting memory allocation failures, then we may
621 * be trying to execute too many commands. We let the running
622 * commands complete or timeout, then try again with a reduced
623 * can_queue. Eventually we will hit the point where we run
624 * on all reserved structs.
626 static void fc_fcp_reduce_can_queue(struct fc_lport
*lp
)
628 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
632 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
637 can_queue
= lp
->host
->can_queue
;
641 lp
->host
->can_queue
= can_queue
;
642 shost_printk(KERN_ERR
, lp
->host
, "Could not allocate frame.\n"
643 "Reducing can_queue to %d.\n", can_queue
);
645 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
649 * fc_fcp_recv() - Reveive FCP frames
650 * @seq: The sequence the frame is on
652 * @arg: The related FCP packet
655 * Context : called from Soft IRQ context
656 * can not called holding list lock
658 static void fc_fcp_recv(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
660 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)arg
;
662 struct fc_frame_header
*fh
;
663 struct fcp_txrdy
*dd
;
670 fh
= fc_frame_header_get(fp
);
671 r_ctl
= fh
->fh_r_ctl
;
674 if (!(lp
->state
& LPORT_ST_READY
))
676 if (fc_fcp_lock_pkt(fsp
))
678 fsp
->last_pkt_time
= jiffies
;
680 if (fh
->fh_type
== FC_TYPE_BLS
) {
681 fc_fcp_abts_resp(fsp
, fp
);
685 if (fsp
->state
& (FC_SRB_ABORTED
| FC_SRB_ABORT_PENDING
))
688 if (r_ctl
== FC_RCTL_DD_DATA_DESC
) {
690 * received XFER RDY from the target
691 * need to send data to the target
693 WARN_ON(fr_flags(fp
) & FCPHF_CRC_UNCHECKED
);
694 dd
= fc_frame_payload_get(fp
, sizeof(*dd
));
697 rc
= fc_fcp_send_data(fsp
, seq
,
698 (size_t) ntohl(dd
->ft_data_ro
),
699 (size_t) ntohl(dd
->ft_burst_len
));
701 seq
->rec_data
= fsp
->xfer_len
;
702 else if (rc
== -ENOMEM
)
703 fsp
->state
|= FC_SRB_NOMEM
;
704 } else if (r_ctl
== FC_RCTL_DD_SOL_DATA
) {
706 * received a DATA frame
707 * next we will copy the data to the system buffer
709 WARN_ON(fr_len(fp
) < sizeof(*fh
)); /* len may be 0 */
710 fc_fcp_recv_data(fsp
, fp
);
711 seq
->rec_data
= fsp
->xfer_contig_end
;
712 } else if (r_ctl
== FC_RCTL_DD_CMD_STATUS
) {
713 WARN_ON(fr_flags(fp
) & FCPHF_CRC_UNCHECKED
);
715 fc_fcp_resp(fsp
, fp
);
717 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl
);
720 fc_fcp_unlock_pkt(fsp
);
725 fc_fcp_error(fsp
, fp
);
726 else if (rc
== -ENOMEM
)
727 fc_fcp_reduce_can_queue(lp
);
730 static void fc_fcp_resp(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
732 struct fc_frame_header
*fh
;
733 struct fcp_resp
*fc_rp
;
734 struct fcp_resp_ext
*rp_ex
;
735 struct fcp_resp_rsp_info
*fc_rp_info
;
743 fh
= (struct fc_frame_header
*)fr_hdr(fp
);
744 if (unlikely(plen
< sizeof(*fh
) + sizeof(*fc_rp
)))
747 fc_rp
= (struct fcp_resp
*)(fh
+ 1);
748 fsp
->cdb_status
= fc_rp
->fr_status
;
749 flags
= fc_rp
->fr_flags
;
750 fsp
->scsi_comp_flags
= flags
;
751 expected_len
= fsp
->data_len
;
753 if (unlikely((flags
& ~FCP_CONF_REQ
) || fc_rp
->fr_status
)) {
754 rp_ex
= (void *)(fc_rp
+ 1);
755 if (flags
& (FCP_RSP_LEN_VAL
| FCP_SNS_LEN_VAL
)) {
756 if (plen
< sizeof(*fc_rp
) + sizeof(*rp_ex
))
758 fc_rp_info
= (struct fcp_resp_rsp_info
*)(rp_ex
+ 1);
759 if (flags
& FCP_RSP_LEN_VAL
) {
760 respl
= ntohl(rp_ex
->fr_rsp_len
);
761 if (respl
!= sizeof(*fc_rp_info
))
763 if (fsp
->wait_for_comp
) {
764 /* Abuse cdb_status for rsp code */
765 fsp
->cdb_status
= fc_rp_info
->rsp_code
;
766 complete(&fsp
->tm_done
);
768 * tmfs will not have any scsi cmd so
775 if (flags
& FCP_SNS_LEN_VAL
) {
776 snsl
= ntohl(rp_ex
->fr_sns_len
);
777 if (snsl
> SCSI_SENSE_BUFFERSIZE
)
778 snsl
= SCSI_SENSE_BUFFERSIZE
;
779 memcpy(fsp
->cmd
->sense_buffer
,
780 (char *)fc_rp_info
+ respl
, snsl
);
783 if (flags
& (FCP_RESID_UNDER
| FCP_RESID_OVER
)) {
784 if (plen
< sizeof(*fc_rp
) + sizeof(rp_ex
->fr_resid
))
786 if (flags
& FCP_RESID_UNDER
) {
787 fsp
->scsi_resid
= ntohl(rp_ex
->fr_resid
);
789 * The cmnd->underflow is the minimum number of
790 * bytes that must be transfered for this
791 * command. Provided a sense condition is not
792 * present, make sure the actual amount
793 * transferred is at least the underflow value
796 if (!(flags
& FCP_SNS_LEN_VAL
) &&
797 (fc_rp
->fr_status
== 0) &&
798 (scsi_bufflen(fsp
->cmd
) -
799 fsp
->scsi_resid
) < fsp
->cmd
->underflow
)
801 expected_len
-= fsp
->scsi_resid
;
803 fsp
->status_code
= FC_ERROR
;
807 fsp
->state
|= FC_SRB_RCV_STATUS
;
810 * Check for missing or extra data frames.
812 if (unlikely(fsp
->xfer_len
!= expected_len
)) {
813 if (fsp
->xfer_len
< expected_len
) {
815 * Some data may be queued locally,
816 * Wait a at least one jiffy to see if it is delivered.
817 * If this expires without data, we may do SRR.
819 fc_fcp_timer_set(fsp
, 2);
822 fsp
->status_code
= FC_DATA_OVRRUN
;
823 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
826 fsp
->xfer_len
, expected_len
, fsp
->data_len
);
828 fc_fcp_complete_locked(fsp
);
832 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
833 flags
, fr_len(fp
), respl
, snsl
);
835 fsp
->status_code
= FC_ERROR
;
836 fc_fcp_complete_locked(fsp
);
840 * fc_fcp_complete_locked() - complete processing of a fcp packet
843 * This function may sleep if a timer is pending. The packet lock must be
844 * held, and the host lock must not be held.
846 static void fc_fcp_complete_locked(struct fc_fcp_pkt
*fsp
)
848 struct fc_lport
*lp
= fsp
->lp
;
853 if (fsp
->state
& FC_SRB_ABORT_PENDING
)
856 if (fsp
->state
& FC_SRB_ABORTED
) {
857 if (!fsp
->status_code
)
858 fsp
->status_code
= FC_CMD_ABORTED
;
861 * Test for transport underrun, independent of response
864 if (fsp
->xfer_len
< fsp
->data_len
&& !fsp
->io_status
&&
865 (!(fsp
->scsi_comp_flags
& FCP_RESID_UNDER
) ||
866 fsp
->xfer_len
< fsp
->data_len
- fsp
->scsi_resid
)) {
867 fsp
->status_code
= FC_DATA_UNDRUN
;
875 if (unlikely(fsp
->scsi_comp_flags
& FCP_CONF_REQ
)) {
876 struct fc_frame
*conf_frame
;
879 csp
= lp
->tt
.seq_start_next(seq
);
880 conf_frame
= fc_frame_alloc(fsp
->lp
, 0);
882 f_ctl
= FC_FC_SEQ_INIT
;
883 f_ctl
|= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
;
884 ep
= fc_seq_exch(seq
);
885 fc_fill_fc_hdr(conf_frame
, FC_RCTL_DD_SOL_CTL
,
887 FC_TYPE_FCP
, f_ctl
, 0);
888 lp
->tt
.seq_send(lp
, csp
, conf_frame
);
891 lp
->tt
.exch_done(seq
);
896 static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt
*fsp
, int error
)
898 struct fc_lport
*lp
= fsp
->lp
;
901 lp
->tt
.exch_done(fsp
->seq_ptr
);
904 fsp
->status_code
= error
;
908 * fc_fcp_cleanup_each_cmd() - Cleanup active commads
912 * @error: fsp status code
914 * If lun or id is -1, they are ignored.
916 static void fc_fcp_cleanup_each_cmd(struct fc_lport
*lp
, unsigned int id
,
917 unsigned int lun
, int error
)
919 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
920 struct fc_fcp_pkt
*fsp
;
921 struct scsi_cmnd
*sc_cmd
;
924 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
926 list_for_each_entry(fsp
, &si
->scsi_pkt_queue
, list
) {
928 if (id
!= -1 && scmd_id(sc_cmd
) != id
)
931 if (lun
!= -1 && sc_cmd
->device
->lun
!= lun
)
934 fc_fcp_pkt_hold(fsp
);
935 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
937 if (!fc_fcp_lock_pkt(fsp
)) {
938 fc_fcp_cleanup_cmd(fsp
, error
);
940 fc_fcp_unlock_pkt(fsp
);
943 fc_fcp_pkt_release(fsp
);
944 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
946 * while we dropped the lock multiple pkts could
947 * have been released, so we have to start over.
951 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
954 static void fc_fcp_abort_io(struct fc_lport
*lp
)
956 fc_fcp_cleanup_each_cmd(lp
, -1, -1, FC_HRD_ERROR
);
960 * fc_fcp_pkt_send() - send a fcp packet to the lower level.
964 * This is called by upper layer protocol.
965 * Return : zero for success and -1 for failure
966 * Context : called from queuecommand which can be called from process
968 * Locks : called with the host lock and irqs disabled.
970 static int fc_fcp_pkt_send(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
)
972 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
975 fsp
->cmd
->SCp
.ptr
= (char *)fsp
;
976 fsp
->cdb_cmd
.fc_dl
= htonl(fsp
->data_len
);
977 fsp
->cdb_cmd
.fc_flags
= fsp
->req_flags
& ~FCP_CFL_LEN_MASK
;
979 int_to_scsilun(fsp
->cmd
->device
->lun
,
980 (struct scsi_lun
*)fsp
->cdb_cmd
.fc_lun
);
981 memcpy(fsp
->cdb_cmd
.fc_cdb
, fsp
->cmd
->cmnd
, fsp
->cmd
->cmd_len
);
982 list_add_tail(&fsp
->list
, &si
->scsi_pkt_queue
);
984 spin_unlock_irq(lp
->host
->host_lock
);
985 rc
= lp
->tt
.fcp_cmd_send(lp
, fsp
, fc_fcp_recv
);
986 spin_lock_irq(lp
->host
->host_lock
);
988 list_del(&fsp
->list
);
993 static int fc_fcp_cmd_send(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
,
994 void (*resp
)(struct fc_seq
*,
1000 struct fc_rport
*rport
;
1001 struct fc_rport_libfc_priv
*rp
;
1002 const size_t len
= sizeof(fsp
->cdb_cmd
);
1005 if (fc_fcp_lock_pkt(fsp
))
1008 fp
= fc_frame_alloc(lp
, sizeof(fsp
->cdb_cmd
));
1014 memcpy(fc_frame_payload_get(fp
, len
), &fsp
->cdb_cmd
, len
);
1015 fr_cmd(fp
) = fsp
->cmd
;
1017 fsp
->max_payload
= rport
->maxframe_size
;
1018 rp
= rport
->dd_data
;
1020 fc_fill_fc_hdr(fp
, FC_RCTL_DD_UNSOL_CMD
, rport
->port_id
,
1021 fc_host_port_id(rp
->local_port
->host
), FC_TYPE_FCP
,
1022 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1024 seq
= lp
->tt
.exch_seq_send(lp
, fp
, resp
, fc_fcp_pkt_destroy
, fsp
, 0);
1030 fsp
->last_pkt_time
= jiffies
;
1032 fc_fcp_pkt_hold(fsp
); /* hold for fc_fcp_pkt_destroy */
1034 setup_timer(&fsp
->timer
, fc_fcp_timeout
, (unsigned long)fsp
);
1035 fc_fcp_timer_set(fsp
,
1036 (fsp
->tgt_flags
& FC_RP_FLAGS_REC_SUPPORTED
) ?
1037 FC_SCSI_REC_TOV
: FC_SCSI_ER_TIMEOUT
);
1039 fc_fcp_unlock_pkt(fsp
);
1044 * transport error handler
1046 static void fc_fcp_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
1048 int error
= PTR_ERR(fp
);
1050 if (fc_fcp_lock_pkt(fsp
))
1055 fc_fcp_retry_cmd(fsp
);
1058 FC_DBG("unknown error %ld\n", PTR_ERR(fp
));
1061 * clear abort pending, because the lower layer
1062 * decided to force completion.
1064 fsp
->state
&= ~FC_SRB_ABORT_PENDING
;
1065 fsp
->status_code
= FC_CMD_PLOGO
;
1066 fc_fcp_complete_locked(fsp
);
1068 fc_fcp_unlock_pkt(fsp
);
1072 * Scsi abort handler- calls to send an abort
1073 * and then wait for abort completion
1075 static int fc_fcp_pkt_abort(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
)
1079 if (fc_fcp_send_abort(fsp
))
1082 init_completion(&fsp
->tm_done
);
1083 fsp
->wait_for_comp
= 1;
1085 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1086 rc
= wait_for_completion_timeout(&fsp
->tm_done
, FC_SCSI_TM_TOV
);
1087 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1088 fsp
->wait_for_comp
= 0;
1091 FC_DBG("target abort cmd failed\n");
1093 } else if (fsp
->state
& FC_SRB_ABORTED
) {
1094 FC_DBG("target abort cmd passed\n");
1096 fc_fcp_complete_locked(fsp
);
1103 * Retry LUN reset after resource allocation failed.
1105 static void fc_lun_reset_send(unsigned long data
)
1107 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)data
;
1108 struct fc_lport
*lp
= fsp
->lp
;
1109 if (lp
->tt
.fcp_cmd_send(lp
, fsp
, fc_tm_done
)) {
1110 if (fsp
->recov_retry
++ >= FC_MAX_RECOV_RETRY
)
1112 if (fc_fcp_lock_pkt(fsp
))
1114 setup_timer(&fsp
->timer
, fc_lun_reset_send
, (unsigned long)fsp
);
1115 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1116 fc_fcp_unlock_pkt(fsp
);
1121 * Scsi device reset handler- send a LUN RESET to the device
1122 * and wait for reset reply
1124 static int fc_lun_reset(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
,
1125 unsigned int id
, unsigned int lun
)
1129 fsp
->cdb_cmd
.fc_dl
= htonl(fsp
->data_len
);
1130 fsp
->cdb_cmd
.fc_tm_flags
= FCP_TMF_LUN_RESET
;
1131 int_to_scsilun(lun
, (struct scsi_lun
*)fsp
->cdb_cmd
.fc_lun
);
1133 fsp
->wait_for_comp
= 1;
1134 init_completion(&fsp
->tm_done
);
1136 fc_lun_reset_send((unsigned long)fsp
);
1139 * wait for completion of reset
1140 * after that make sure all commands are terminated
1142 rc
= wait_for_completion_timeout(&fsp
->tm_done
, FC_SCSI_TM_TOV
);
1144 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1145 fsp
->state
|= FC_SRB_COMPL
;
1146 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1148 del_timer_sync(&fsp
->timer
);
1150 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1152 lp
->tt
.exch_done(fsp
->seq_ptr
);
1153 fsp
->seq_ptr
= NULL
;
1155 fsp
->wait_for_comp
= 0;
1156 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1159 FC_DBG("lun reset failed\n");
1163 /* cdb_status holds the tmf's rsp code */
1164 if (fsp
->cdb_status
!= FCP_TMF_CMPL
)
1167 FC_DBG("lun reset to lun %u completed\n", lun
);
1168 fc_fcp_cleanup_each_cmd(lp
, id
, lun
, FC_CMD_ABORTED
);
1173 * Task Managment response handler
1175 static void fc_tm_done(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
1177 struct fc_fcp_pkt
*fsp
= arg
;
1178 struct fc_frame_header
*fh
;
1182 * If there is an error just let it timeout or wait
1183 * for TMF to be aborted if it timedout.
1185 * scsi-eh will escalate for when either happens.
1190 if (fc_fcp_lock_pkt(fsp
))
1194 * raced with eh timeout handler.
1196 if (!fsp
->seq_ptr
|| !fsp
->wait_for_comp
) {
1197 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1201 fh
= fc_frame_header_get(fp
);
1202 if (fh
->fh_type
!= FC_TYPE_BLS
)
1203 fc_fcp_resp(fsp
, fp
);
1204 fsp
->seq_ptr
= NULL
;
1205 fsp
->lp
->tt
.exch_done(seq
);
1207 fc_fcp_unlock_pkt(fsp
);
1210 static void fc_fcp_cleanup(struct fc_lport
*lp
)
1212 fc_fcp_cleanup_each_cmd(lp
, -1, -1, FC_ERROR
);
1216 * fc_fcp_timeout: called by OS timer function.
1218 * The timer has been inactivated and must be reactivated if desired
1219 * using fc_fcp_timer_set().
1223 * If REC is supported, just issue it, and return. The REC exchange will
1224 * complete or time out, and recovery can continue at that point.
1226 * Otherwise, if the response has been received without all the data,
1227 * it has been ER_TIMEOUT since the response was received.
1229 * If the response has not been received,
1230 * we see if data was received recently. If it has been, we continue waiting,
1231 * otherwise, we abort the command.
1233 static void fc_fcp_timeout(unsigned long data
)
1235 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)data
;
1236 struct fc_rport
*rport
= fsp
->rport
;
1237 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1239 if (fc_fcp_lock_pkt(fsp
))
1242 if (fsp
->cdb_cmd
.fc_tm_flags
)
1245 fsp
->state
|= FC_SRB_FCP_PROCESSING_TMO
;
1247 if (rp
->flags
& FC_RP_FLAGS_REC_SUPPORTED
)
1249 else if (time_after_eq(fsp
->last_pkt_time
+ (FC_SCSI_ER_TIMEOUT
/ 2),
1251 fc_fcp_timer_set(fsp
, FC_SCSI_ER_TIMEOUT
);
1252 else if (fsp
->state
& FC_SRB_RCV_STATUS
)
1253 fc_fcp_complete_locked(fsp
);
1255 fc_timeout_error(fsp
);
1256 fsp
->state
&= ~FC_SRB_FCP_PROCESSING_TMO
;
1258 fc_fcp_unlock_pkt(fsp
);
1262 * Send a REC ELS request
1264 static void fc_fcp_rec(struct fc_fcp_pkt
*fsp
)
1266 struct fc_lport
*lp
;
1267 struct fc_frame
*fp
;
1268 struct fc_rport
*rport
;
1269 struct fc_rport_libfc_priv
*rp
;
1273 rp
= rport
->dd_data
;
1274 if (!fsp
->seq_ptr
|| rp
->rp_state
!= RPORT_ST_READY
) {
1275 fsp
->status_code
= FC_HRD_ERROR
;
1277 fc_fcp_complete_locked(fsp
);
1280 fp
= fc_frame_alloc(lp
, sizeof(struct fc_els_rec
));
1284 fr_seq(fp
) = fsp
->seq_ptr
;
1285 fc_fill_fc_hdr(fp
, FC_RCTL_ELS_REQ
, rport
->port_id
,
1286 fc_host_port_id(rp
->local_port
->host
), FC_TYPE_ELS
,
1287 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1288 if (lp
->tt
.elsct_send(lp
, rport
, fp
, ELS_REC
, fc_fcp_rec_resp
,
1289 fsp
, jiffies_to_msecs(FC_SCSI_REC_TOV
))) {
1290 fc_fcp_pkt_hold(fsp
); /* hold while REC outstanding */
1295 if (fsp
->recov_retry
++ < FC_MAX_RECOV_RETRY
)
1296 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1298 fc_timeout_error(fsp
);
1302 * Receive handler for REC ELS frame
1303 * if it is a reject then let the scsi layer to handle
1304 * the timeout. if it is a LS_ACC then if the io was not completed
1305 * then set the timeout and return otherwise complete the exchange
1306 * and tell the scsi layer to restart the I/O.
1308 static void fc_fcp_rec_resp(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
1310 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)arg
;
1311 struct fc_els_rec_acc
*recp
;
1312 struct fc_els_ls_rjt
*rjt
;
1316 enum dma_data_direction data_dir
;
1318 struct fc_rport_libfc_priv
*rp
;
1321 fc_fcp_rec_error(fsp
, fp
);
1325 if (fc_fcp_lock_pkt(fsp
))
1328 fsp
->recov_retry
= 0;
1329 opcode
= fc_frame_payload_op(fp
);
1330 if (opcode
== ELS_LS_RJT
) {
1331 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
1332 switch (rjt
->er_reason
) {
1334 FC_DEBUG_FCP("device %x unexpected REC reject "
1335 "reason %d expl %d\n",
1336 fsp
->rport
->port_id
, rjt
->er_reason
,
1340 FC_DEBUG_FCP("device does not support REC\n");
1341 rp
= fsp
->rport
->dd_data
;
1343 * if we do not spport RECs or got some bogus
1344 * reason then resetup timer so we check for
1347 rp
->flags
&= ~FC_RP_FLAGS_REC_SUPPORTED
;
1348 fc_fcp_timer_set(fsp
, FC_SCSI_ER_TIMEOUT
);
1353 * If no data transfer, the command frame got dropped
1354 * so we just retry. If data was transferred, we
1355 * lost the response but the target has no record,
1356 * so we abort and retry.
1358 if (rjt
->er_explan
== ELS_EXPL_OXID_RXID
&&
1359 fsp
->xfer_len
== 0) {
1360 fc_fcp_retry_cmd(fsp
);
1363 fc_timeout_error(fsp
);
1366 } else if (opcode
== ELS_LS_ACC
) {
1367 if (fsp
->state
& FC_SRB_ABORTED
)
1370 data_dir
= fsp
->cmd
->sc_data_direction
;
1371 recp
= fc_frame_payload_get(fp
, sizeof(*recp
));
1372 offset
= ntohl(recp
->reca_fc4value
);
1373 e_stat
= ntohl(recp
->reca_e_stat
);
1375 if (e_stat
& ESB_ST_COMPLETE
) {
1378 * The exchange is complete.
1380 * For output, we must've lost the response.
1381 * For input, all data must've been sent.
1382 * We lost may have lost the response
1383 * (and a confirmation was requested) and maybe
1386 * If all data received, send SRR
1387 * asking for response. If partial data received,
1388 * or gaps, SRR requests data at start of gap.
1389 * Recovery via SRR relies on in-order-delivery.
1391 if (data_dir
== DMA_TO_DEVICE
) {
1392 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1393 } else if (fsp
->xfer_contig_end
== offset
) {
1394 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1396 offset
= fsp
->xfer_contig_end
;
1397 r_ctl
= FC_RCTL_DD_SOL_DATA
;
1399 fc_fcp_srr(fsp
, r_ctl
, offset
);
1400 } else if (e_stat
& ESB_ST_SEQ_INIT
) {
1403 * The remote port has the initiative, so just
1404 * keep waiting for it to complete.
1406 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1410 * The exchange is incomplete, we have seq. initiative.
1411 * Lost response with requested confirmation,
1412 * lost confirmation, lost transfer ready or
1415 * For output, if not all data was received, ask
1416 * for transfer ready to be repeated.
1418 * If we received or sent all the data, send SRR to
1421 * If we lost a response, we may have lost some read
1424 r_ctl
= FC_RCTL_DD_SOL_DATA
;
1425 if (data_dir
== DMA_TO_DEVICE
) {
1426 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1427 if (offset
< fsp
->data_len
)
1428 r_ctl
= FC_RCTL_DD_DATA_DESC
;
1429 } else if (offset
== fsp
->xfer_contig_end
) {
1430 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1431 } else if (fsp
->xfer_contig_end
< offset
) {
1432 offset
= fsp
->xfer_contig_end
;
1434 fc_fcp_srr(fsp
, r_ctl
, offset
);
1438 fc_fcp_unlock_pkt(fsp
);
1440 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding REC */
1445 * Handle error response or timeout for REC exchange.
1447 static void fc_fcp_rec_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
1449 int error
= PTR_ERR(fp
);
1451 if (fc_fcp_lock_pkt(fsp
))
1456 fc_fcp_retry_cmd(fsp
);
1460 FC_DBG("REC %p fid %x error unexpected error %d\n",
1461 fsp
, fsp
->rport
->port_id
, error
);
1462 fsp
->status_code
= FC_CMD_PLOGO
;
1465 case -FC_EX_TIMEOUT
:
1467 * Assume REC or LS_ACC was lost.
1468 * The exchange manager will have aborted REC, so retry.
1470 FC_DBG("REC fid %x error error %d retry %d/%d\n",
1471 fsp
->rport
->port_id
, error
, fsp
->recov_retry
,
1472 FC_MAX_RECOV_RETRY
);
1473 if (fsp
->recov_retry
++ < FC_MAX_RECOV_RETRY
)
1476 fc_timeout_error(fsp
);
1479 fc_fcp_unlock_pkt(fsp
);
1481 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding REC */
1485 * Time out error routine:
1486 * abort's the I/O close the exchange and
1487 * send completion notification to scsi layer
1489 static void fc_timeout_error(struct fc_fcp_pkt
*fsp
)
1491 fsp
->status_code
= FC_CMD_TIME_OUT
;
1492 fsp
->cdb_status
= 0;
1495 * if this fails then we let the scsi command timer fire and
1498 fc_fcp_send_abort(fsp
);
1502 * Sequence retransmission request.
1503 * This is called after receiving status but insufficient data, or
1504 * when expecting status but the request has timed out.
1506 static void fc_fcp_srr(struct fc_fcp_pkt
*fsp
, enum fc_rctl r_ctl
, u32 offset
)
1508 struct fc_lport
*lp
= fsp
->lp
;
1509 struct fc_rport
*rport
;
1510 struct fc_rport_libfc_priv
*rp
;
1511 struct fc_exch
*ep
= fc_seq_exch(fsp
->seq_ptr
);
1513 struct fcp_srr
*srr
;
1514 struct fc_frame
*fp
;
1518 rp
= rport
->dd_data
;
1519 cdb_op
= fsp
->cdb_cmd
.fc_cdb
[0];
1521 if (!(rp
->flags
& FC_RP_FLAGS_RETRY
) || rp
->rp_state
!= RPORT_ST_READY
)
1522 goto retry
; /* shouldn't happen */
1523 fp
= fc_frame_alloc(lp
, sizeof(*srr
));
1527 srr
= fc_frame_payload_get(fp
, sizeof(*srr
));
1528 memset(srr
, 0, sizeof(*srr
));
1529 srr
->srr_op
= ELS_SRR
;
1530 srr
->srr_ox_id
= htons(ep
->oxid
);
1531 srr
->srr_rx_id
= htons(ep
->rxid
);
1532 srr
->srr_r_ctl
= r_ctl
;
1533 srr
->srr_rel_off
= htonl(offset
);
1535 fc_fill_fc_hdr(fp
, FC_RCTL_ELS4_REQ
, rport
->port_id
,
1536 fc_host_port_id(rp
->local_port
->host
), FC_TYPE_FCP
,
1537 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1539 seq
= lp
->tt
.exch_seq_send(lp
, fp
, fc_fcp_srr_resp
, NULL
,
1540 fsp
, jiffies_to_msecs(FC_SCSI_REC_TOV
));
1545 fsp
->recov_seq
= seq
;
1546 fsp
->xfer_len
= offset
;
1547 fsp
->xfer_contig_end
= offset
;
1548 fsp
->state
&= ~FC_SRB_RCV_STATUS
;
1549 fc_fcp_pkt_hold(fsp
); /* hold for outstanding SRR */
1552 fc_fcp_retry_cmd(fsp
);
1556 * Handle response from SRR.
1558 static void fc_fcp_srr_resp(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
1560 struct fc_fcp_pkt
*fsp
= arg
;
1561 struct fc_frame_header
*fh
;
1564 fc_fcp_srr_error(fsp
, fp
);
1568 if (fc_fcp_lock_pkt(fsp
))
1571 fh
= fc_frame_header_get(fp
);
1573 * BUG? fc_fcp_srr_error calls exch_done which would release
1574 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
1575 * then fc_exch_timeout would be sending an abort. The exch_done
1576 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
1577 * an abort response though.
1579 if (fh
->fh_type
== FC_TYPE_BLS
) {
1580 fc_fcp_unlock_pkt(fsp
);
1584 fsp
->recov_seq
= NULL
;
1585 switch (fc_frame_payload_op(fp
)) {
1587 fsp
->recov_retry
= 0;
1588 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1592 fc_timeout_error(fsp
);
1595 fc_fcp_unlock_pkt(fsp
);
1596 fsp
->lp
->tt
.exch_done(seq
);
1599 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding SRR */
1602 static void fc_fcp_srr_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
1604 if (fc_fcp_lock_pkt(fsp
))
1606 fsp
->lp
->tt
.exch_done(fsp
->recov_seq
);
1607 fsp
->recov_seq
= NULL
;
1608 switch (PTR_ERR(fp
)) {
1609 case -FC_EX_TIMEOUT
:
1610 if (fsp
->recov_retry
++ < FC_MAX_RECOV_RETRY
)
1613 fc_timeout_error(fsp
);
1615 case -FC_EX_CLOSED
: /* e.g., link failure */
1618 fc_fcp_retry_cmd(fsp
);
1621 fc_fcp_unlock_pkt(fsp
);
1623 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding SRR */
1626 static inline int fc_fcp_lport_queue_ready(struct fc_lport
*lp
)
1629 return (lp
->state
== LPORT_ST_READY
) && lp
->link_up
&& !lp
->qfull
;
1633 * fc_queuecommand - The queuecommand function of the scsi template
1634 * @cmd: struct scsi_cmnd to be executed
1635 * @done: Callback function to be called when cmd is completed
1637 * this is the i/o strategy routine, called by the scsi layer
1638 * this routine is called with holding the host_lock.
1640 int fc_queuecommand(struct scsi_cmnd
*sc_cmd
, void (*done
)(struct scsi_cmnd
*))
1642 struct fc_lport
*lp
;
1643 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1644 struct fc_fcp_pkt
*fsp
;
1645 struct fc_rport_libfc_priv
*rp
;
1648 struct fcoe_dev_stats
*stats
;
1650 lp
= shost_priv(sc_cmd
->device
->host
);
1652 rval
= fc_remote_port_chkready(rport
);
1654 sc_cmd
->result
= rval
;
1659 if (!*(struct fc_remote_port
**)rport
->dd_data
) {
1661 * rport is transitioning from blocked/deleted to
1664 sc_cmd
->result
= DID_IMM_RETRY
<< 16;
1669 rp
= rport
->dd_data
;
1671 if (!fc_fcp_lport_queue_ready(lp
)) {
1672 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1676 fsp
= fc_fcp_pkt_alloc(lp
, GFP_ATOMIC
);
1678 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1683 * build the libfc request pkt
1685 fsp
->cmd
= sc_cmd
; /* save the cmd */
1686 fsp
->lp
= lp
; /* save the softc ptr */
1687 fsp
->rport
= rport
; /* set the remote port ptr */
1688 sc_cmd
->scsi_done
= done
;
1691 * set up the transfer length
1693 fsp
->data_len
= scsi_bufflen(sc_cmd
);
1697 * setup the data direction
1699 stats
= lp
->dev_stats
[smp_processor_id()];
1700 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1701 fsp
->req_flags
= FC_SRB_READ
;
1702 stats
->InputRequests
++;
1703 stats
->InputMegabytes
= fsp
->data_len
;
1704 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1705 fsp
->req_flags
= FC_SRB_WRITE
;
1706 stats
->OutputRequests
++;
1707 stats
->OutputMegabytes
= fsp
->data_len
;
1710 stats
->ControlRequests
++;
1713 fsp
->tgt_flags
= rp
->flags
;
1715 init_timer(&fsp
->timer
);
1716 fsp
->timer
.data
= (unsigned long)fsp
;
1719 * send it to the lower layer
1720 * if we get -1 return then put the request in the pending
1723 rval
= fc_fcp_pkt_send(lp
, fsp
);
1725 fsp
->state
= FC_SRB_FREE
;
1726 fc_fcp_pkt_release(fsp
);
1727 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1732 EXPORT_SYMBOL(fc_queuecommand
);
1735 * fc_io_compl() - Handle responses for completed commands
1738 * Translates a error to a Linux SCSI error.
1740 * The fcp packet lock must be held when calling.
1742 static void fc_io_compl(struct fc_fcp_pkt
*fsp
)
1744 struct fc_fcp_internal
*si
;
1745 struct scsi_cmnd
*sc_cmd
;
1746 struct fc_lport
*lp
;
1747 unsigned long flags
;
1749 fsp
->state
|= FC_SRB_COMPL
;
1750 if (!(fsp
->state
& FC_SRB_FCP_PROCESSING_TMO
)) {
1751 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1752 del_timer_sync(&fsp
->timer
);
1753 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1757 si
= fc_get_scsi_internal(lp
);
1758 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
1760 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1765 * if a command timed out while we had to try and throttle IO
1766 * and it is now getting cleaned up, then we are about to
1767 * try again so clear the throttled flag incase we get more
1770 if (si
->throttled
&& fsp
->state
& FC_SRB_NOMEM
)
1776 if (!sc_cmd
->SCp
.ptr
) {
1777 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1781 CMD_SCSI_STATUS(sc_cmd
) = fsp
->cdb_status
;
1782 switch (fsp
->status_code
) {
1784 if (fsp
->cdb_status
== 0) {
1788 sc_cmd
->result
= DID_OK
<< 16;
1789 if (fsp
->scsi_resid
)
1790 CMD_RESID_LEN(sc_cmd
) = fsp
->scsi_resid
;
1791 } else if (fsp
->cdb_status
== QUEUE_FULL
) {
1792 struct scsi_device
*tmp_sdev
;
1793 struct scsi_device
*sdev
= sc_cmd
->device
;
1795 shost_for_each_device(tmp_sdev
, sdev
->host
) {
1796 if (tmp_sdev
->id
!= sdev
->id
)
1799 if (tmp_sdev
->queue_depth
> 1) {
1800 scsi_track_queue_full(tmp_sdev
,
1805 sc_cmd
->result
= (DID_OK
<< 16) | fsp
->cdb_status
;
1808 * transport level I/O was ok but scsi
1809 * has non zero status
1811 sc_cmd
->result
= (DID_OK
<< 16) | fsp
->cdb_status
;
1815 sc_cmd
->result
= DID_ERROR
<< 16;
1817 case FC_DATA_UNDRUN
:
1818 if ((fsp
->cdb_status
== 0) && !(fsp
->req_flags
& FC_SRB_READ
)) {
1820 * scsi status is good but transport level
1823 sc_cmd
->result
= DID_OK
<< 16;
1826 * scsi got underrun, this is an error
1828 CMD_RESID_LEN(sc_cmd
) = fsp
->scsi_resid
;
1829 sc_cmd
->result
= (DID_ERROR
<< 16) | fsp
->cdb_status
;
1832 case FC_DATA_OVRRUN
:
1834 * overrun is an error
1836 sc_cmd
->result
= (DID_ERROR
<< 16) | fsp
->cdb_status
;
1838 case FC_CMD_ABORTED
:
1839 sc_cmd
->result
= (DID_ABORT
<< 16) | fsp
->io_status
;
1841 case FC_CMD_TIME_OUT
:
1842 sc_cmd
->result
= (DID_BUS_BUSY
<< 16) | fsp
->io_status
;
1845 sc_cmd
->result
= (DID_RESET
<< 16);
1848 sc_cmd
->result
= (DID_NO_CONNECT
<< 16);
1851 sc_cmd
->result
= (DID_ERROR
<< 16);
1855 list_del(&fsp
->list
);
1856 sc_cmd
->SCp
.ptr
= NULL
;
1857 sc_cmd
->scsi_done(sc_cmd
);
1858 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1860 /* release ref from initial allocation in queue command */
1861 fc_fcp_pkt_release(fsp
);
1865 * fc_fcp_complete() - complete processing of a fcp packet
1868 * This function may sleep if a fsp timer is pending.
1869 * The host lock must not be held by caller.
1871 void fc_fcp_complete(struct fc_fcp_pkt
*fsp
)
1873 if (fc_fcp_lock_pkt(fsp
))
1876 fc_fcp_complete_locked(fsp
);
1877 fc_fcp_unlock_pkt(fsp
);
1879 EXPORT_SYMBOL(fc_fcp_complete
);
1882 * fc_eh_abort() - Abort a command
1883 * @sc_cmd: scsi command to abort
1885 * From scsi host template.
1886 * send ABTS to the target device and wait for the response
1887 * sc_cmd is the pointer to the command to be aborted.
1889 int fc_eh_abort(struct scsi_cmnd
*sc_cmd
)
1891 struct fc_fcp_pkt
*fsp
;
1892 struct fc_lport
*lp
;
1894 unsigned long flags
;
1896 lp
= shost_priv(sc_cmd
->device
->host
);
1897 if (lp
->state
!= LPORT_ST_READY
)
1899 else if (!lp
->link_up
)
1902 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
1903 fsp
= CMD_SP(sc_cmd
);
1905 /* command completed while scsi eh was setting up */
1906 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1909 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1910 fc_fcp_pkt_hold(fsp
);
1911 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1913 if (fc_fcp_lock_pkt(fsp
)) {
1914 /* completed while we were waiting for timer to be deleted */
1919 rc
= fc_fcp_pkt_abort(lp
, fsp
);
1920 fc_fcp_unlock_pkt(fsp
);
1923 fc_fcp_pkt_release(fsp
);
1926 EXPORT_SYMBOL(fc_eh_abort
);
1929 * fc_eh_device_reset() Reset a single LUN
1930 * @sc_cmd: scsi command
1932 * Set from scsi host template to send tm cmd to the target and wait for the
1935 int fc_eh_device_reset(struct scsi_cmnd
*sc_cmd
)
1937 struct fc_lport
*lp
;
1938 struct fc_fcp_pkt
*fsp
;
1939 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1941 struct fc_rport_libfc_priv
*rp
;
1944 rval
= fc_remote_port_chkready(rport
);
1948 rp
= rport
->dd_data
;
1949 lp
= shost_priv(sc_cmd
->device
->host
);
1951 if (lp
->state
!= LPORT_ST_READY
)
1954 fsp
= fc_fcp_pkt_alloc(lp
, GFP_NOIO
);
1956 FC_DBG("could not allocate scsi_pkt\n");
1957 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
1962 * Build the libfc request pkt. Do not set the scsi cmnd, because
1963 * the sc passed in is not setup for execution like when sent
1964 * through the queuecommand callout.
1966 fsp
->lp
= lp
; /* save the softc ptr */
1967 fsp
->rport
= rport
; /* set the remote port ptr */
1970 * flush outstanding commands
1972 rc
= fc_lun_reset(lp
, fsp
, scmd_id(sc_cmd
), sc_cmd
->device
->lun
);
1973 fsp
->state
= FC_SRB_FREE
;
1974 fc_fcp_pkt_release(fsp
);
1979 EXPORT_SYMBOL(fc_eh_device_reset
);
1982 * fc_eh_host_reset() - The reset function will reset the ports on the host.
1983 * @sc_cmd: scsi command
1985 int fc_eh_host_reset(struct scsi_cmnd
*sc_cmd
)
1987 struct Scsi_Host
*shost
= sc_cmd
->device
->host
;
1988 struct fc_lport
*lp
= shost_priv(shost
);
1989 unsigned long wait_tmo
;
1991 lp
->tt
.lport_reset(lp
);
1992 wait_tmo
= jiffies
+ FC_HOST_RESET_TIMEOUT
;
1993 while (!fc_fcp_lport_queue_ready(lp
) && time_before(jiffies
, wait_tmo
))
1996 if (fc_fcp_lport_queue_ready(lp
)) {
1997 shost_printk(KERN_INFO
, shost
, "Host reset succeeded.\n");
2000 shost_printk(KERN_INFO
, shost
, "Host reset failed. "
2001 "lport not ready.\n");
2005 EXPORT_SYMBOL(fc_eh_host_reset
);
2008 * fc_slave_alloc() - configure queue depth
2009 * @sdev: scsi device
2011 * Configures queue depth based on host's cmd_per_len. If not set
2012 * then we use the libfc default.
2014 int fc_slave_alloc(struct scsi_device
*sdev
)
2016 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
2019 if (!rport
|| fc_remote_port_chkready(rport
))
2022 if (sdev
->tagged_supported
) {
2023 if (sdev
->host
->hostt
->cmd_per_lun
)
2024 queue_depth
= sdev
->host
->hostt
->cmd_per_lun
;
2026 queue_depth
= FC_FCP_DFLT_QUEUE_DEPTH
;
2027 scsi_activate_tcq(sdev
, queue_depth
);
2031 EXPORT_SYMBOL(fc_slave_alloc
);
2033 int fc_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2035 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
2036 return sdev
->queue_depth
;
2038 EXPORT_SYMBOL(fc_change_queue_depth
);
2040 int fc_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
2042 if (sdev
->tagged_supported
) {
2043 scsi_set_tag_type(sdev
, tag_type
);
2045 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
2047 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
2053 EXPORT_SYMBOL(fc_change_queue_type
);
2055 void fc_fcp_destroy(struct fc_lport
*lp
)
2057 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
2059 if (!list_empty(&si
->scsi_pkt_queue
))
2060 printk(KERN_ERR
"Leaked scsi packets.\n");
2062 mempool_destroy(si
->scsi_pkt_pool
);
2064 lp
->scsi_priv
= NULL
;
2066 EXPORT_SYMBOL(fc_fcp_destroy
);
2068 int fc_fcp_init(struct fc_lport
*lp
)
2071 struct fc_fcp_internal
*si
;
2073 if (!lp
->tt
.fcp_cmd_send
)
2074 lp
->tt
.fcp_cmd_send
= fc_fcp_cmd_send
;
2076 if (!lp
->tt
.fcp_cleanup
)
2077 lp
->tt
.fcp_cleanup
= fc_fcp_cleanup
;
2079 if (!lp
->tt
.fcp_abort_io
)
2080 lp
->tt
.fcp_abort_io
= fc_fcp_abort_io
;
2082 si
= kzalloc(sizeof(struct fc_fcp_internal
), GFP_KERNEL
);
2086 INIT_LIST_HEAD(&si
->scsi_pkt_queue
);
2088 si
->scsi_pkt_pool
= mempool_create_slab_pool(2, scsi_pkt_cachep
);
2089 if (!si
->scsi_pkt_pool
) {
2099 EXPORT_SYMBOL(fc_fcp_init
);
2101 static int __init
libfc_init(void)
2105 scsi_pkt_cachep
= kmem_cache_create("libfc_fcp_pkt",
2106 sizeof(struct fc_fcp_pkt
),
2107 0, SLAB_HWCACHE_ALIGN
, NULL
);
2108 if (scsi_pkt_cachep
== NULL
) {
2109 FC_DBG("Unable to allocate SRB cache...module load failed!");
2113 rc
= fc_setup_exch_mgr();
2115 goto destroy_pkt_cache
;
2117 rc
= fc_setup_rport();
2123 fc_destroy_exch_mgr();
2125 kmem_cache_destroy(scsi_pkt_cachep
);
2129 static void __exit
libfc_exit(void)
2131 kmem_cache_destroy(scsi_pkt_cachep
);
2132 fc_destroy_exch_mgr();
2136 module_init(libfc_init
);
2137 module_exit(libfc_exit
);