2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * Maintained at www.Open-FCoE.org
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/spinlock.h>
27 #include <linux/scatterlist.h>
28 #include <linux/err.h>
29 #include <linux/crc32.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_cmnd.h>
37 #include <scsi/fc/fc_fc2.h>
39 #include <scsi/libfc.h>
40 #include <scsi/fc_encode.h>
42 MODULE_AUTHOR("Open-FCoE.org");
43 MODULE_DESCRIPTION("libfc");
44 MODULE_LICENSE("GPL");
46 static int fc_fcp_debug
;
48 #define FC_DEBUG_FCP(fmt...) \
54 static struct kmem_cache
*scsi_pkt_cachep
;
56 /* SRB state definitions */
57 #define FC_SRB_FREE 0 /* cmd is free */
58 #define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
59 #define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
60 #define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
61 #define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
62 #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
63 #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
64 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
65 #define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
67 #define FC_SRB_READ (1 << 1)
68 #define FC_SRB_WRITE (1 << 0)
71 * The SCp.ptr should be tested and set under the host lock. NULL indicates
72 * that the command has been retruned to the scsi layer.
74 #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
75 #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
76 #define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
77 #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
78 #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
80 struct fc_fcp_internal
{
81 mempool_t
*scsi_pkt_pool
;
82 struct list_head scsi_pkt_queue
;
86 #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
90 * FC scsi I/O related functions
92 static void fc_fcp_recv_data(struct fc_fcp_pkt
*, struct fc_frame
*);
93 static void fc_fcp_recv(struct fc_seq
*, struct fc_frame
*, void *);
94 static void fc_fcp_resp(struct fc_fcp_pkt
*, struct fc_frame
*);
95 static void fc_fcp_complete_locked(struct fc_fcp_pkt
*);
96 static void fc_tm_done(struct fc_seq
*, struct fc_frame
*, void *);
97 static void fc_fcp_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
);
98 static void fc_timeout_error(struct fc_fcp_pkt
*);
99 static void fc_fcp_timeout(unsigned long data
);
100 static void fc_fcp_rec(struct fc_fcp_pkt
*);
101 static void fc_fcp_rec_error(struct fc_fcp_pkt
*, struct fc_frame
*);
102 static void fc_fcp_rec_resp(struct fc_seq
*, struct fc_frame
*, void *);
103 static void fc_io_compl(struct fc_fcp_pkt
*);
105 static void fc_fcp_srr(struct fc_fcp_pkt
*, enum fc_rctl
, u32
);
106 static void fc_fcp_srr_resp(struct fc_seq
*, struct fc_frame
*, void *);
107 static void fc_fcp_srr_error(struct fc_fcp_pkt
*, struct fc_frame
*);
110 * command status codes
112 #define FC_COMPLETE 0
113 #define FC_CMD_ABORTED 1
114 #define FC_CMD_RESET 2
115 #define FC_CMD_PLOGO 3
117 #define FC_TRANS_ERR 5
118 #define FC_DATA_OVRRUN 6
119 #define FC_DATA_UNDRUN 7
121 #define FC_HRD_ERROR 9
122 #define FC_CMD_TIME_OUT 10
125 * Error recovery timeout values.
127 #define FC_SCSI_ER_TIMEOUT (10 * HZ)
128 #define FC_SCSI_TM_TOV (10 * HZ)
129 #define FC_SCSI_REC_TOV (2 * HZ)
130 #define FC_HOST_RESET_TIMEOUT (30 * HZ)
132 #define FC_MAX_ERROR_CNT 5
133 #define FC_MAX_RECOV_RETRY 3
135 #define FC_FCP_DFLT_QUEUE_DEPTH 32
138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
139 * @lp: fc lport struct
140 * @gfp: gfp flags for allocation
142 * This is used by upper layer scsi driver.
143 * Return Value : scsi_pkt structure or null on allocation failure.
144 * Context : call from process context. no locking required.
146 static struct fc_fcp_pkt
*fc_fcp_pkt_alloc(struct fc_lport
*lp
, gfp_t gfp
)
148 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
149 struct fc_fcp_pkt
*fsp
;
151 fsp
= mempool_alloc(si
->scsi_pkt_pool
, gfp
);
153 memset(fsp
, 0, sizeof(*fsp
));
155 atomic_set(&fsp
->ref_cnt
, 1);
156 init_timer(&fsp
->timer
);
157 INIT_LIST_HEAD(&fsp
->list
);
158 spin_lock_init(&fsp
->scsi_pkt_lock
);
164 * fc_fcp_pkt_release() - release hold on scsi_pkt packet
165 * @fsp: fcp packet struct
167 * This is used by upper layer scsi driver.
168 * Context : call from process and interrupt context.
169 * no locking required
171 static void fc_fcp_pkt_release(struct fc_fcp_pkt
*fsp
)
173 if (atomic_dec_and_test(&fsp
->ref_cnt
)) {
174 struct fc_fcp_internal
*si
= fc_get_scsi_internal(fsp
->lp
);
176 mempool_free(fsp
, si
->scsi_pkt_pool
);
180 static void fc_fcp_pkt_hold(struct fc_fcp_pkt
*fsp
)
182 atomic_inc(&fsp
->ref_cnt
);
186 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet
187 * @seq: exchange sequence
188 * @fsp: fcp packet struct
190 * Release hold on scsi_pkt packet set to keep scsi_pkt
191 * till EM layer exch resource is not freed.
192 * Context : called from from EM layer.
193 * no locking required
195 static void fc_fcp_pkt_destroy(struct fc_seq
*seq
, void *fsp
)
197 fc_fcp_pkt_release(fsp
);
201 * fc_fcp_lock_pkt() - lock a packet and get a ref to it.
204 * We should only return error if we return a command to scsi-ml before
205 * getting a response. This can happen in cases where we send a abort, but
206 * do not wait for the response and the abort and command can be passing
207 * each other on the wire/network-layer.
209 * Note: this function locks the packet and gets a reference to allow
210 * callers to call the completion function while the lock is held and
211 * not have to worry about the packets refcount.
213 * TODO: Maybe we should just have callers grab/release the lock and
214 * have a function that they call to verify the fsp and grab a ref if
217 static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt
*fsp
)
219 spin_lock_bh(&fsp
->scsi_pkt_lock
);
220 if (fsp
->state
& FC_SRB_COMPL
) {
221 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
225 fc_fcp_pkt_hold(fsp
);
229 static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt
*fsp
)
231 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
232 fc_fcp_pkt_release(fsp
);
235 static void fc_fcp_timer_set(struct fc_fcp_pkt
*fsp
, unsigned long delay
)
237 if (!(fsp
->state
& FC_SRB_COMPL
))
238 mod_timer(&fsp
->timer
, jiffies
+ delay
);
241 static int fc_fcp_send_abort(struct fc_fcp_pkt
*fsp
)
246 fsp
->state
|= FC_SRB_ABORT_PENDING
;
247 return fsp
->lp
->tt
.seq_exch_abort(fsp
->seq_ptr
, 0);
252 * An abort isn't needed.
254 static void fc_fcp_retry_cmd(struct fc_fcp_pkt
*fsp
)
257 fsp
->lp
->tt
.exch_done(fsp
->seq_ptr
);
261 fsp
->state
&= ~FC_SRB_ABORT_PENDING
;
263 fsp
->status_code
= FC_ERROR
;
264 fc_fcp_complete_locked(fsp
);
268 * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP
269 * transfer for a read I/O indicated by the fc_fcp_pkt.
270 * @fsp: ptr to the fc_fcp_pkt
272 * This is called in exch_seq_send() when we have a newly allocated
273 * exchange with a valid exchange id to setup ddp.
277 void fc_fcp_ddp_setup(struct fc_fcp_pkt
*fsp
, u16 xid
)
285 if ((fsp
->req_flags
& FC_SRB_READ
) &&
286 (lp
->lro_enabled
) && (lp
->tt
.ddp_setup
)) {
287 if (lp
->tt
.ddp_setup(lp
, xid
, scsi_sglist(fsp
->cmd
),
288 scsi_sg_count(fsp
->cmd
)))
292 EXPORT_SYMBOL(fc_fcp_ddp_setup
);
295 * fc_fcp_ddp_done - calls to LLD's ddp_done to release any
296 * DDP related resources for this I/O if it is initialized
298 * @fsp: ptr to the fc_fcp_pkt
302 static void fc_fcp_ddp_done(struct fc_fcp_pkt
*fsp
)
310 if (fsp
->xfer_ddp
&& lp
->tt
.ddp_done
) {
311 fsp
->xfer_len
= lp
->tt
.ddp_done(lp
, fsp
->xfer_ddp
);
318 * Receive SCSI data from target.
319 * Called after receiving solicited data.
321 static void fc_fcp_recv_data(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
323 struct scsi_cmnd
*sc
= fsp
->cmd
;
324 struct fc_lport
*lp
= fsp
->lp
;
325 struct fcoe_dev_stats
*stats
;
326 struct fc_frame_header
*fh
;
333 struct scatterlist
*sg
;
336 fh
= fc_frame_header_get(fp
);
337 offset
= ntohl(fh
->fh_parm_offset
);
338 start_offset
= offset
;
339 len
= fr_len(fp
) - sizeof(*fh
);
340 buf
= fc_frame_payload_get(fp
, 0);
342 /* if this I/O is ddped, update xfer len */
343 fc_fcp_ddp_done(fsp
);
345 if (offset
+ len
> fsp
->data_len
) {
346 /* this should never happen */
347 if ((fr_flags(fp
) & FCPHF_CRC_UNCHECKED
) &&
348 fc_frame_crc_check(fp
))
350 FC_DEBUG_FCP("data received past end. len %zx offset %zx "
351 "data_len %x\n", len
, offset
, fsp
->data_len
);
352 fc_fcp_retry_cmd(fsp
);
355 if (offset
!= fsp
->xfer_len
)
356 fsp
->state
|= FC_SRB_DISCONTIG
;
359 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
)
360 crc
= crc32(~0, (u8
*) fh
, sizeof(*fh
));
362 sg
= scsi_sglist(sc
);
365 while (remaining
> 0 && sg
) {
370 if (offset
>= sg
->length
) {
371 offset
-= sg
->length
;
375 sg_bytes
= min(remaining
, sg
->length
- offset
);
378 * The scatterlist item may be bigger than PAGE_SIZE,
379 * but we are limited to mapping PAGE_SIZE at a time.
381 off
= offset
+ sg
->offset
;
382 sg_bytes
= min(sg_bytes
, (size_t)
383 (PAGE_SIZE
- (off
& ~PAGE_MASK
)));
384 page_addr
= kmap_atomic(sg_page(sg
) + (off
>> PAGE_SHIFT
),
387 break; /* XXX panic? */
389 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
)
390 crc
= crc32(crc
, buf
, sg_bytes
);
391 memcpy((char *)page_addr
+ (off
& ~PAGE_MASK
), buf
,
394 kunmap_atomic(page_addr
, KM_SOFTIRQ0
);
397 remaining
-= sg_bytes
;
398 copy_len
+= sg_bytes
;
401 if (fr_flags(fp
) & FCPHF_CRC_UNCHECKED
) {
402 buf
= fc_frame_payload_get(fp
, 0);
404 crc
= crc32(crc
, buf
+ len
, 4 - (len
% 4));
405 len
+= 4 - (len
% 4);
408 if (~crc
!= le32_to_cpu(fr_crc(fp
))) {
410 stats
= lp
->dev_stats
[smp_processor_id()];
411 stats
->ErrorFrames
++;
412 if (stats
->InvalidCRCCount
++ < 5)
413 FC_DBG("CRC error on data frame\n");
415 * Assume the frame is total garbage.
416 * We may have copied it over the good part
418 * If so, we need to retry the entire operation.
419 * Otherwise, ignore it.
421 if (fsp
->state
& FC_SRB_DISCONTIG
)
422 fc_fcp_retry_cmd(fsp
);
427 if (fsp
->xfer_contig_end
== start_offset
)
428 fsp
->xfer_contig_end
+= copy_len
;
429 fsp
->xfer_len
+= copy_len
;
432 * In the very rare event that this data arrived after the response
433 * and completes the transfer, call the completion handler.
435 if (unlikely(fsp
->state
& FC_SRB_RCV_STATUS
) &&
436 fsp
->xfer_len
== fsp
->data_len
- fsp
->scsi_resid
)
437 fc_fcp_complete_locked(fsp
);
441 * fc_fcp_send_data() - Send SCSI data to target.
442 * @fsp: ptr to fc_fcp_pkt
443 * @sp: ptr to this sequence
444 * @offset: starting offset for this data request
445 * @seq_blen: the burst length for this data request
447 * Called after receiving a Transfer Ready data descriptor.
448 * if LLD is capable of seq offload then send down seq_blen
449 * size of data in single frame, otherwise send multiple FC
450 * frames of max FC frame payload supported by target port.
452 * Returns : 0 for success.
454 static int fc_fcp_send_data(struct fc_fcp_pkt
*fsp
, struct fc_seq
*seq
,
455 size_t offset
, size_t seq_blen
)
458 struct scsi_cmnd
*sc
;
459 struct scatterlist
*sg
;
460 struct fc_frame
*fp
= NULL
;
461 struct fc_lport
*lp
= fsp
->lp
;
466 size_t frame_offset
, fh_parm_offset
;
470 int using_sg
= lp
->sg_supp
;
473 WARN_ON(seq_blen
<= 0);
474 if (unlikely(offset
+ seq_blen
> fsp
->data_len
)) {
475 /* this should never happen */
476 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
478 fc_fcp_send_abort(fsp
);
480 } else if (offset
!= fsp
->xfer_len
) {
481 /* Out of Order Data Request - no problem, but unexpected. */
482 FC_DEBUG_FCP("xfer-ready non-contiguous. "
483 "seq_blen %zx offset %zx\n", seq_blen
, offset
);
487 * if LLD is capable of seq_offload then set transport
488 * burst length (t_blen) to seq_blen, otherwise set t_blen
489 * to max FC frame payload previously set in fsp->max_payload.
491 t_blen
= fsp
->max_payload
;
492 if (lp
->seq_offload
) {
493 t_blen
= min(seq_blen
, (size_t)lp
->lso_max
);
494 FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
495 fsp
, seq_blen
, lp
->lso_max
, t_blen
);
498 WARN_ON(t_blen
< FC_MIN_MAX_PAYLOAD
);
500 t_blen
&= ~(512 - 1); /* round down to block size */
501 WARN_ON(t_blen
< FC_MIN_MAX_PAYLOAD
); /* won't go below 256 */
504 remaining
= seq_blen
;
505 fh_parm_offset
= frame_offset
= offset
;
507 seq
= lp
->tt
.seq_start_next(seq
);
508 f_ctl
= FC_FC_REL_OFF
;
512 * If a get_page()/put_page() will fail, don't use sg lists
513 * in the fc_frame structure.
515 * The put_page() may be long after the I/O has completed
516 * in the case of FCoE, since the network driver does it
517 * via free_skb(). See the test in free_pages_check().
519 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
522 for (sg
= scsi_sglist(sc
); sg
; sg
= sg_next(sg
)) {
523 if (page_count(sg_page(sg
)) == 0 ||
524 (sg_page(sg
)->flags
& (1 << PG_lru
|
538 sg
= scsi_sglist(sc
);
540 while (remaining
> 0 && sg
) {
541 if (offset
>= sg
->length
) {
542 offset
-= sg
->length
;
547 tlen
= min(t_blen
, remaining
);
550 * TODO. Temporary workaround. fc_seq_send() can't
551 * handle odd lengths in non-linear skbs.
552 * This will be the final fragment only.
557 fp
= _fc_frame_alloc(lp
, 0);
561 fp
= fc_frame_alloc(lp
, tlen
);
565 data
= (void *)(fr_hdr(fp
)) +
566 sizeof(struct fc_frame_header
);
568 fh_parm_offset
= frame_offset
;
569 fr_max_payload(fp
) = fsp
->max_payload
;
571 sg_bytes
= min(tlen
, sg
->length
- offset
);
573 WARN_ON(skb_shinfo(fp_skb(fp
))->nr_frags
>
575 get_page(sg_page(sg
));
576 skb_fill_page_desc(fp_skb(fp
),
577 skb_shinfo(fp_skb(fp
))->nr_frags
,
578 sg_page(sg
), sg
->offset
+ offset
,
580 fp_skb(fp
)->data_len
+= sg_bytes
;
581 fr_len(fp
) += sg_bytes
;
582 fp_skb(fp
)->truesize
+= PAGE_SIZE
;
584 size_t off
= offset
+ sg
->offset
;
587 * The scatterlist item may be bigger than PAGE_SIZE,
588 * but we must not cross pages inside the kmap.
590 sg_bytes
= min(sg_bytes
, (size_t) (PAGE_SIZE
-
591 (off
& ~PAGE_MASK
)));
592 page_addr
= kmap_atomic(sg_page(sg
) +
595 memcpy(data
, (char *)page_addr
+ (off
& ~PAGE_MASK
),
597 kunmap_atomic(page_addr
, KM_SOFTIRQ0
);
601 frame_offset
+= sg_bytes
;
603 remaining
-= sg_bytes
;
609 * Send sequence with transfer sequence initiative in case
610 * this is last FCP frame of the sequence.
613 f_ctl
|= FC_FC_SEQ_INIT
| FC_FC_END_SEQ
;
615 ep
= fc_seq_exch(seq
);
616 fc_fill_fc_hdr(fp
, FC_RCTL_DD_SOL_DATA
, ep
->did
, ep
->sid
,
617 FC_TYPE_FCP
, f_ctl
, fh_parm_offset
);
620 * send fragment using for a sequence.
622 error
= lp
->tt
.seq_send(lp
, seq
, fp
);
624 WARN_ON(1); /* send error should be rare */
625 fc_fcp_retry_cmd(fsp
);
630 fsp
->xfer_len
+= seq_blen
; /* premature count? */
634 static void fc_fcp_abts_resp(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
637 struct fc_ba_rjt
*brp
;
638 struct fc_frame_header
*fh
;
640 fh
= fc_frame_header_get(fp
);
641 switch (fh
->fh_r_ctl
) {
645 brp
= fc_frame_payload_get(fp
, sizeof(*brp
));
646 if (brp
&& brp
->br_reason
== FC_BA_RJT_LOG_ERR
)
651 * we will let the command timeout
652 * and scsi-ml recover in this case,
653 * therefore cleared the ba_done flag.
659 fsp
->state
|= FC_SRB_ABORTED
;
660 fsp
->state
&= ~FC_SRB_ABORT_PENDING
;
662 if (fsp
->wait_for_comp
)
663 complete(&fsp
->tm_done
);
665 fc_fcp_complete_locked(fsp
);
670 * fc_fcp_reduce_can_queue() - drop can_queue
671 * @lp: lport to drop queueing for
673 * If we are getting memory allocation failures, then we may
674 * be trying to execute too many commands. We let the running
675 * commands complete or timeout, then try again with a reduced
676 * can_queue. Eventually we will hit the point where we run
677 * on all reserved structs.
679 static void fc_fcp_reduce_can_queue(struct fc_lport
*lp
)
681 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
685 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
690 can_queue
= lp
->host
->can_queue
;
694 lp
->host
->can_queue
= can_queue
;
695 shost_printk(KERN_ERR
, lp
->host
, "Could not allocate frame.\n"
696 "Reducing can_queue to %d.\n", can_queue
);
698 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
702 * fc_fcp_recv() - Reveive FCP frames
703 * @seq: The sequence the frame is on
705 * @arg: The related FCP packet
708 * Context : called from Soft IRQ context
709 * can not called holding list lock
711 static void fc_fcp_recv(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
713 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)arg
;
715 struct fc_frame_header
*fh
;
716 struct fcp_txrdy
*dd
;
723 fh
= fc_frame_header_get(fp
);
724 r_ctl
= fh
->fh_r_ctl
;
727 if (!(lp
->state
& LPORT_ST_READY
))
729 if (fc_fcp_lock_pkt(fsp
))
731 fsp
->last_pkt_time
= jiffies
;
733 if (fh
->fh_type
== FC_TYPE_BLS
) {
734 fc_fcp_abts_resp(fsp
, fp
);
738 if (fsp
->state
& (FC_SRB_ABORTED
| FC_SRB_ABORT_PENDING
))
741 if (r_ctl
== FC_RCTL_DD_DATA_DESC
) {
743 * received XFER RDY from the target
744 * need to send data to the target
746 WARN_ON(fr_flags(fp
) & FCPHF_CRC_UNCHECKED
);
747 dd
= fc_frame_payload_get(fp
, sizeof(*dd
));
750 rc
= fc_fcp_send_data(fsp
, seq
,
751 (size_t) ntohl(dd
->ft_data_ro
),
752 (size_t) ntohl(dd
->ft_burst_len
));
754 seq
->rec_data
= fsp
->xfer_len
;
755 else if (rc
== -ENOMEM
)
756 fsp
->state
|= FC_SRB_NOMEM
;
757 } else if (r_ctl
== FC_RCTL_DD_SOL_DATA
) {
759 * received a DATA frame
760 * next we will copy the data to the system buffer
762 WARN_ON(fr_len(fp
) < sizeof(*fh
)); /* len may be 0 */
763 fc_fcp_recv_data(fsp
, fp
);
764 seq
->rec_data
= fsp
->xfer_contig_end
;
765 } else if (r_ctl
== FC_RCTL_DD_CMD_STATUS
) {
766 WARN_ON(fr_flags(fp
) & FCPHF_CRC_UNCHECKED
);
768 fc_fcp_resp(fsp
, fp
);
770 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl
);
773 fc_fcp_unlock_pkt(fsp
);
778 fc_fcp_error(fsp
, fp
);
779 else if (rc
== -ENOMEM
)
780 fc_fcp_reduce_can_queue(lp
);
783 static void fc_fcp_resp(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
785 struct fc_frame_header
*fh
;
786 struct fcp_resp
*fc_rp
;
787 struct fcp_resp_ext
*rp_ex
;
788 struct fcp_resp_rsp_info
*fc_rp_info
;
796 fh
= (struct fc_frame_header
*)fr_hdr(fp
);
797 if (unlikely(plen
< sizeof(*fh
) + sizeof(*fc_rp
)))
800 fc_rp
= (struct fcp_resp
*)(fh
+ 1);
801 fsp
->cdb_status
= fc_rp
->fr_status
;
802 flags
= fc_rp
->fr_flags
;
803 fsp
->scsi_comp_flags
= flags
;
804 expected_len
= fsp
->data_len
;
806 /* if ddp, update xfer len */
807 fc_fcp_ddp_done(fsp
);
809 if (unlikely((flags
& ~FCP_CONF_REQ
) || fc_rp
->fr_status
)) {
810 rp_ex
= (void *)(fc_rp
+ 1);
811 if (flags
& (FCP_RSP_LEN_VAL
| FCP_SNS_LEN_VAL
)) {
812 if (plen
< sizeof(*fc_rp
) + sizeof(*rp_ex
))
814 fc_rp_info
= (struct fcp_resp_rsp_info
*)(rp_ex
+ 1);
815 if (flags
& FCP_RSP_LEN_VAL
) {
816 respl
= ntohl(rp_ex
->fr_rsp_len
);
817 if (respl
!= sizeof(*fc_rp_info
))
819 if (fsp
->wait_for_comp
) {
820 /* Abuse cdb_status for rsp code */
821 fsp
->cdb_status
= fc_rp_info
->rsp_code
;
822 complete(&fsp
->tm_done
);
824 * tmfs will not have any scsi cmd so
831 if (flags
& FCP_SNS_LEN_VAL
) {
832 snsl
= ntohl(rp_ex
->fr_sns_len
);
833 if (snsl
> SCSI_SENSE_BUFFERSIZE
)
834 snsl
= SCSI_SENSE_BUFFERSIZE
;
835 memcpy(fsp
->cmd
->sense_buffer
,
836 (char *)fc_rp_info
+ respl
, snsl
);
839 if (flags
& (FCP_RESID_UNDER
| FCP_RESID_OVER
)) {
840 if (plen
< sizeof(*fc_rp
) + sizeof(rp_ex
->fr_resid
))
842 if (flags
& FCP_RESID_UNDER
) {
843 fsp
->scsi_resid
= ntohl(rp_ex
->fr_resid
);
845 * The cmnd->underflow is the minimum number of
846 * bytes that must be transfered for this
847 * command. Provided a sense condition is not
848 * present, make sure the actual amount
849 * transferred is at least the underflow value
852 if (!(flags
& FCP_SNS_LEN_VAL
) &&
853 (fc_rp
->fr_status
== 0) &&
854 (scsi_bufflen(fsp
->cmd
) -
855 fsp
->scsi_resid
) < fsp
->cmd
->underflow
)
857 expected_len
-= fsp
->scsi_resid
;
859 fsp
->status_code
= FC_ERROR
;
863 fsp
->state
|= FC_SRB_RCV_STATUS
;
866 * Check for missing or extra data frames.
868 if (unlikely(fsp
->xfer_len
!= expected_len
)) {
869 if (fsp
->xfer_len
< expected_len
) {
871 * Some data may be queued locally,
872 * Wait a at least one jiffy to see if it is delivered.
873 * If this expires without data, we may do SRR.
875 fc_fcp_timer_set(fsp
, 2);
878 fsp
->status_code
= FC_DATA_OVRRUN
;
879 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
882 fsp
->xfer_len
, expected_len
, fsp
->data_len
);
884 fc_fcp_complete_locked(fsp
);
888 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
889 flags
, fr_len(fp
), respl
, snsl
);
891 fsp
->status_code
= FC_ERROR
;
892 fc_fcp_complete_locked(fsp
);
896 * fc_fcp_complete_locked() - complete processing of a fcp packet
899 * This function may sleep if a timer is pending. The packet lock must be
900 * held, and the host lock must not be held.
902 static void fc_fcp_complete_locked(struct fc_fcp_pkt
*fsp
)
904 struct fc_lport
*lp
= fsp
->lp
;
909 if (fsp
->state
& FC_SRB_ABORT_PENDING
)
912 if (fsp
->state
& FC_SRB_ABORTED
) {
913 if (!fsp
->status_code
)
914 fsp
->status_code
= FC_CMD_ABORTED
;
917 * Test for transport underrun, independent of response
920 if (fsp
->xfer_len
< fsp
->data_len
&& !fsp
->io_status
&&
921 (!(fsp
->scsi_comp_flags
& FCP_RESID_UNDER
) ||
922 fsp
->xfer_len
< fsp
->data_len
- fsp
->scsi_resid
)) {
923 fsp
->status_code
= FC_DATA_UNDRUN
;
931 if (unlikely(fsp
->scsi_comp_flags
& FCP_CONF_REQ
)) {
932 struct fc_frame
*conf_frame
;
935 csp
= lp
->tt
.seq_start_next(seq
);
936 conf_frame
= fc_frame_alloc(fsp
->lp
, 0);
938 f_ctl
= FC_FC_SEQ_INIT
;
939 f_ctl
|= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
;
940 ep
= fc_seq_exch(seq
);
941 fc_fill_fc_hdr(conf_frame
, FC_RCTL_DD_SOL_CTL
,
943 FC_TYPE_FCP
, f_ctl
, 0);
944 lp
->tt
.seq_send(lp
, csp
, conf_frame
);
947 lp
->tt
.exch_done(seq
);
952 static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt
*fsp
, int error
)
954 struct fc_lport
*lp
= fsp
->lp
;
957 lp
->tt
.exch_done(fsp
->seq_ptr
);
960 fsp
->status_code
= error
;
964 * fc_fcp_cleanup_each_cmd() - Cleanup active commads
968 * @error: fsp status code
970 * If lun or id is -1, they are ignored.
972 static void fc_fcp_cleanup_each_cmd(struct fc_lport
*lp
, unsigned int id
,
973 unsigned int lun
, int error
)
975 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
976 struct fc_fcp_pkt
*fsp
;
977 struct scsi_cmnd
*sc_cmd
;
980 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
982 list_for_each_entry(fsp
, &si
->scsi_pkt_queue
, list
) {
984 if (id
!= -1 && scmd_id(sc_cmd
) != id
)
987 if (lun
!= -1 && sc_cmd
->device
->lun
!= lun
)
990 fc_fcp_pkt_hold(fsp
);
991 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
993 if (!fc_fcp_lock_pkt(fsp
)) {
994 fc_fcp_cleanup_cmd(fsp
, error
);
996 fc_fcp_unlock_pkt(fsp
);
999 fc_fcp_pkt_release(fsp
);
1000 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
1002 * while we dropped the lock multiple pkts could
1003 * have been released, so we have to start over.
1007 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1010 static void fc_fcp_abort_io(struct fc_lport
*lp
)
1012 fc_fcp_cleanup_each_cmd(lp
, -1, -1, FC_HRD_ERROR
);
1016 * fc_fcp_pkt_send() - send a fcp packet to the lower level.
1020 * This is called by upper layer protocol.
1021 * Return : zero for success and -1 for failure
1022 * Context : called from queuecommand which can be called from process
1024 * Locks : called with the host lock and irqs disabled.
1026 static int fc_fcp_pkt_send(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
)
1028 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
1031 fsp
->cmd
->SCp
.ptr
= (char *)fsp
;
1032 fsp
->cdb_cmd
.fc_dl
= htonl(fsp
->data_len
);
1033 fsp
->cdb_cmd
.fc_flags
= fsp
->req_flags
& ~FCP_CFL_LEN_MASK
;
1035 int_to_scsilun(fsp
->cmd
->device
->lun
,
1036 (struct scsi_lun
*)fsp
->cdb_cmd
.fc_lun
);
1037 memcpy(fsp
->cdb_cmd
.fc_cdb
, fsp
->cmd
->cmnd
, fsp
->cmd
->cmd_len
);
1038 list_add_tail(&fsp
->list
, &si
->scsi_pkt_queue
);
1040 spin_unlock_irq(lp
->host
->host_lock
);
1041 rc
= lp
->tt
.fcp_cmd_send(lp
, fsp
, fc_fcp_recv
);
1042 spin_lock_irq(lp
->host
->host_lock
);
1044 list_del(&fsp
->list
);
1049 static int fc_fcp_cmd_send(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
,
1050 void (*resp
)(struct fc_seq
*,
1051 struct fc_frame
*fp
,
1054 struct fc_frame
*fp
;
1056 struct fc_rport
*rport
;
1057 struct fc_rport_libfc_priv
*rp
;
1058 const size_t len
= sizeof(fsp
->cdb_cmd
);
1061 if (fc_fcp_lock_pkt(fsp
))
1064 fp
= fc_frame_alloc(lp
, sizeof(fsp
->cdb_cmd
));
1070 memcpy(fc_frame_payload_get(fp
, len
), &fsp
->cdb_cmd
, len
);
1073 fsp
->max_payload
= rport
->maxframe_size
;
1074 rp
= rport
->dd_data
;
1076 fc_fill_fc_hdr(fp
, FC_RCTL_DD_UNSOL_CMD
, rport
->port_id
,
1077 fc_host_port_id(rp
->local_port
->host
), FC_TYPE_FCP
,
1078 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1080 seq
= lp
->tt
.exch_seq_send(lp
, fp
, resp
, fc_fcp_pkt_destroy
, fsp
, 0);
1086 fsp
->last_pkt_time
= jiffies
;
1088 fc_fcp_pkt_hold(fsp
); /* hold for fc_fcp_pkt_destroy */
1090 setup_timer(&fsp
->timer
, fc_fcp_timeout
, (unsigned long)fsp
);
1091 fc_fcp_timer_set(fsp
,
1092 (fsp
->tgt_flags
& FC_RP_FLAGS_REC_SUPPORTED
) ?
1093 FC_SCSI_REC_TOV
: FC_SCSI_ER_TIMEOUT
);
1095 fc_fcp_unlock_pkt(fsp
);
1100 * transport error handler
1102 static void fc_fcp_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
1104 int error
= PTR_ERR(fp
);
1106 if (fc_fcp_lock_pkt(fsp
))
1111 fc_fcp_retry_cmd(fsp
);
1114 FC_DBG("unknown error %ld\n", PTR_ERR(fp
));
1117 * clear abort pending, because the lower layer
1118 * decided to force completion.
1120 fsp
->state
&= ~FC_SRB_ABORT_PENDING
;
1121 fsp
->status_code
= FC_CMD_PLOGO
;
1122 fc_fcp_complete_locked(fsp
);
1124 fc_fcp_unlock_pkt(fsp
);
1128 * Scsi abort handler- calls to send an abort
1129 * and then wait for abort completion
1131 static int fc_fcp_pkt_abort(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
)
1135 if (fc_fcp_send_abort(fsp
))
1138 init_completion(&fsp
->tm_done
);
1139 fsp
->wait_for_comp
= 1;
1141 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1142 rc
= wait_for_completion_timeout(&fsp
->tm_done
, FC_SCSI_TM_TOV
);
1143 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1144 fsp
->wait_for_comp
= 0;
1147 FC_DBG("target abort cmd failed\n");
1149 } else if (fsp
->state
& FC_SRB_ABORTED
) {
1150 FC_DBG("target abort cmd passed\n");
1152 fc_fcp_complete_locked(fsp
);
1159 * Retry LUN reset after resource allocation failed.
1161 static void fc_lun_reset_send(unsigned long data
)
1163 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)data
;
1164 struct fc_lport
*lp
= fsp
->lp
;
1165 if (lp
->tt
.fcp_cmd_send(lp
, fsp
, fc_tm_done
)) {
1166 if (fsp
->recov_retry
++ >= FC_MAX_RECOV_RETRY
)
1168 if (fc_fcp_lock_pkt(fsp
))
1170 setup_timer(&fsp
->timer
, fc_lun_reset_send
, (unsigned long)fsp
);
1171 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1172 fc_fcp_unlock_pkt(fsp
);
1177 * Scsi device reset handler- send a LUN RESET to the device
1178 * and wait for reset reply
1180 static int fc_lun_reset(struct fc_lport
*lp
, struct fc_fcp_pkt
*fsp
,
1181 unsigned int id
, unsigned int lun
)
1185 fsp
->cdb_cmd
.fc_dl
= htonl(fsp
->data_len
);
1186 fsp
->cdb_cmd
.fc_tm_flags
= FCP_TMF_LUN_RESET
;
1187 int_to_scsilun(lun
, (struct scsi_lun
*)fsp
->cdb_cmd
.fc_lun
);
1189 fsp
->wait_for_comp
= 1;
1190 init_completion(&fsp
->tm_done
);
1192 fc_lun_reset_send((unsigned long)fsp
);
1195 * wait for completion of reset
1196 * after that make sure all commands are terminated
1198 rc
= wait_for_completion_timeout(&fsp
->tm_done
, FC_SCSI_TM_TOV
);
1200 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1201 fsp
->state
|= FC_SRB_COMPL
;
1202 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1204 del_timer_sync(&fsp
->timer
);
1206 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1208 lp
->tt
.exch_done(fsp
->seq_ptr
);
1209 fsp
->seq_ptr
= NULL
;
1211 fsp
->wait_for_comp
= 0;
1212 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1215 FC_DBG("lun reset failed\n");
1219 /* cdb_status holds the tmf's rsp code */
1220 if (fsp
->cdb_status
!= FCP_TMF_CMPL
)
1223 FC_DBG("lun reset to lun %u completed\n", lun
);
1224 fc_fcp_cleanup_each_cmd(lp
, id
, lun
, FC_CMD_ABORTED
);
1229 * Task Managment response handler
1231 static void fc_tm_done(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
1233 struct fc_fcp_pkt
*fsp
= arg
;
1234 struct fc_frame_header
*fh
;
1238 * If there is an error just let it timeout or wait
1239 * for TMF to be aborted if it timedout.
1241 * scsi-eh will escalate for when either happens.
1246 if (fc_fcp_lock_pkt(fsp
))
1250 * raced with eh timeout handler.
1252 if (!fsp
->seq_ptr
|| !fsp
->wait_for_comp
) {
1253 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1257 fh
= fc_frame_header_get(fp
);
1258 if (fh
->fh_type
!= FC_TYPE_BLS
)
1259 fc_fcp_resp(fsp
, fp
);
1260 fsp
->seq_ptr
= NULL
;
1261 fsp
->lp
->tt
.exch_done(seq
);
1263 fc_fcp_unlock_pkt(fsp
);
1266 static void fc_fcp_cleanup(struct fc_lport
*lp
)
1268 fc_fcp_cleanup_each_cmd(lp
, -1, -1, FC_ERROR
);
1272 * fc_fcp_timeout: called by OS timer function.
1274 * The timer has been inactivated and must be reactivated if desired
1275 * using fc_fcp_timer_set().
1279 * If REC is supported, just issue it, and return. The REC exchange will
1280 * complete or time out, and recovery can continue at that point.
1282 * Otherwise, if the response has been received without all the data,
1283 * it has been ER_TIMEOUT since the response was received.
1285 * If the response has not been received,
1286 * we see if data was received recently. If it has been, we continue waiting,
1287 * otherwise, we abort the command.
1289 static void fc_fcp_timeout(unsigned long data
)
1291 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)data
;
1292 struct fc_rport
*rport
= fsp
->rport
;
1293 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1295 if (fc_fcp_lock_pkt(fsp
))
1298 if (fsp
->cdb_cmd
.fc_tm_flags
)
1301 fsp
->state
|= FC_SRB_FCP_PROCESSING_TMO
;
1303 if (rp
->flags
& FC_RP_FLAGS_REC_SUPPORTED
)
1305 else if (time_after_eq(fsp
->last_pkt_time
+ (FC_SCSI_ER_TIMEOUT
/ 2),
1307 fc_fcp_timer_set(fsp
, FC_SCSI_ER_TIMEOUT
);
1308 else if (fsp
->state
& FC_SRB_RCV_STATUS
)
1309 fc_fcp_complete_locked(fsp
);
1311 fc_timeout_error(fsp
);
1312 fsp
->state
&= ~FC_SRB_FCP_PROCESSING_TMO
;
1314 fc_fcp_unlock_pkt(fsp
);
1318 * Send a REC ELS request
1320 static void fc_fcp_rec(struct fc_fcp_pkt
*fsp
)
1322 struct fc_lport
*lp
;
1323 struct fc_frame
*fp
;
1324 struct fc_rport
*rport
;
1325 struct fc_rport_libfc_priv
*rp
;
1329 rp
= rport
->dd_data
;
1330 if (!fsp
->seq_ptr
|| rp
->rp_state
!= RPORT_ST_READY
) {
1331 fsp
->status_code
= FC_HRD_ERROR
;
1333 fc_fcp_complete_locked(fsp
);
1336 fp
= fc_frame_alloc(lp
, sizeof(struct fc_els_rec
));
1340 fr_seq(fp
) = fsp
->seq_ptr
;
1341 fc_fill_fc_hdr(fp
, FC_RCTL_ELS_REQ
, rport
->port_id
,
1342 fc_host_port_id(rp
->local_port
->host
), FC_TYPE_ELS
,
1343 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1344 if (lp
->tt
.elsct_send(lp
, rport
, fp
, ELS_REC
, fc_fcp_rec_resp
,
1345 fsp
, jiffies_to_msecs(FC_SCSI_REC_TOV
))) {
1346 fc_fcp_pkt_hold(fsp
); /* hold while REC outstanding */
1351 if (fsp
->recov_retry
++ < FC_MAX_RECOV_RETRY
)
1352 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1354 fc_timeout_error(fsp
);
1358 * Receive handler for REC ELS frame
1359 * if it is a reject then let the scsi layer to handle
1360 * the timeout. if it is a LS_ACC then if the io was not completed
1361 * then set the timeout and return otherwise complete the exchange
1362 * and tell the scsi layer to restart the I/O.
1364 static void fc_fcp_rec_resp(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
1366 struct fc_fcp_pkt
*fsp
= (struct fc_fcp_pkt
*)arg
;
1367 struct fc_els_rec_acc
*recp
;
1368 struct fc_els_ls_rjt
*rjt
;
1372 enum dma_data_direction data_dir
;
1374 struct fc_rport_libfc_priv
*rp
;
1377 fc_fcp_rec_error(fsp
, fp
);
1381 if (fc_fcp_lock_pkt(fsp
))
1384 fsp
->recov_retry
= 0;
1385 opcode
= fc_frame_payload_op(fp
);
1386 if (opcode
== ELS_LS_RJT
) {
1387 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
1388 switch (rjt
->er_reason
) {
1390 FC_DEBUG_FCP("device %x unexpected REC reject "
1391 "reason %d expl %d\n",
1392 fsp
->rport
->port_id
, rjt
->er_reason
,
1396 FC_DEBUG_FCP("device does not support REC\n");
1397 rp
= fsp
->rport
->dd_data
;
1399 * if we do not spport RECs or got some bogus
1400 * reason then resetup timer so we check for
1403 rp
->flags
&= ~FC_RP_FLAGS_REC_SUPPORTED
;
1404 fc_fcp_timer_set(fsp
, FC_SCSI_ER_TIMEOUT
);
1409 * If no data transfer, the command frame got dropped
1410 * so we just retry. If data was transferred, we
1411 * lost the response but the target has no record,
1412 * so we abort and retry.
1414 if (rjt
->er_explan
== ELS_EXPL_OXID_RXID
&&
1415 fsp
->xfer_len
== 0) {
1416 fc_fcp_retry_cmd(fsp
);
1419 fc_timeout_error(fsp
);
1422 } else if (opcode
== ELS_LS_ACC
) {
1423 if (fsp
->state
& FC_SRB_ABORTED
)
1426 data_dir
= fsp
->cmd
->sc_data_direction
;
1427 recp
= fc_frame_payload_get(fp
, sizeof(*recp
));
1428 offset
= ntohl(recp
->reca_fc4value
);
1429 e_stat
= ntohl(recp
->reca_e_stat
);
1431 if (e_stat
& ESB_ST_COMPLETE
) {
1434 * The exchange is complete.
1436 * For output, we must've lost the response.
1437 * For input, all data must've been sent.
1438 * We lost may have lost the response
1439 * (and a confirmation was requested) and maybe
1442 * If all data received, send SRR
1443 * asking for response. If partial data received,
1444 * or gaps, SRR requests data at start of gap.
1445 * Recovery via SRR relies on in-order-delivery.
1447 if (data_dir
== DMA_TO_DEVICE
) {
1448 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1449 } else if (fsp
->xfer_contig_end
== offset
) {
1450 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1452 offset
= fsp
->xfer_contig_end
;
1453 r_ctl
= FC_RCTL_DD_SOL_DATA
;
1455 fc_fcp_srr(fsp
, r_ctl
, offset
);
1456 } else if (e_stat
& ESB_ST_SEQ_INIT
) {
1459 * The remote port has the initiative, so just
1460 * keep waiting for it to complete.
1462 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1466 * The exchange is incomplete, we have seq. initiative.
1467 * Lost response with requested confirmation,
1468 * lost confirmation, lost transfer ready or
1471 * For output, if not all data was received, ask
1472 * for transfer ready to be repeated.
1474 * If we received or sent all the data, send SRR to
1477 * If we lost a response, we may have lost some read
1480 r_ctl
= FC_RCTL_DD_SOL_DATA
;
1481 if (data_dir
== DMA_TO_DEVICE
) {
1482 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1483 if (offset
< fsp
->data_len
)
1484 r_ctl
= FC_RCTL_DD_DATA_DESC
;
1485 } else if (offset
== fsp
->xfer_contig_end
) {
1486 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
1487 } else if (fsp
->xfer_contig_end
< offset
) {
1488 offset
= fsp
->xfer_contig_end
;
1490 fc_fcp_srr(fsp
, r_ctl
, offset
);
1494 fc_fcp_unlock_pkt(fsp
);
1496 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding REC */
1501 * Handle error response or timeout for REC exchange.
1503 static void fc_fcp_rec_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
1505 int error
= PTR_ERR(fp
);
1507 if (fc_fcp_lock_pkt(fsp
))
1512 fc_fcp_retry_cmd(fsp
);
1516 FC_DBG("REC %p fid %x error unexpected error %d\n",
1517 fsp
, fsp
->rport
->port_id
, error
);
1518 fsp
->status_code
= FC_CMD_PLOGO
;
1521 case -FC_EX_TIMEOUT
:
1523 * Assume REC or LS_ACC was lost.
1524 * The exchange manager will have aborted REC, so retry.
1526 FC_DBG("REC fid %x error error %d retry %d/%d\n",
1527 fsp
->rport
->port_id
, error
, fsp
->recov_retry
,
1528 FC_MAX_RECOV_RETRY
);
1529 if (fsp
->recov_retry
++ < FC_MAX_RECOV_RETRY
)
1532 fc_timeout_error(fsp
);
1535 fc_fcp_unlock_pkt(fsp
);
1537 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding REC */
1541 * Time out error routine:
1542 * abort's the I/O close the exchange and
1543 * send completion notification to scsi layer
1545 static void fc_timeout_error(struct fc_fcp_pkt
*fsp
)
1547 fsp
->status_code
= FC_CMD_TIME_OUT
;
1548 fsp
->cdb_status
= 0;
1551 * if this fails then we let the scsi command timer fire and
1554 fc_fcp_send_abort(fsp
);
1558 * Sequence retransmission request.
1559 * This is called after receiving status but insufficient data, or
1560 * when expecting status but the request has timed out.
1562 static void fc_fcp_srr(struct fc_fcp_pkt
*fsp
, enum fc_rctl r_ctl
, u32 offset
)
1564 struct fc_lport
*lp
= fsp
->lp
;
1565 struct fc_rport
*rport
;
1566 struct fc_rport_libfc_priv
*rp
;
1567 struct fc_exch
*ep
= fc_seq_exch(fsp
->seq_ptr
);
1569 struct fcp_srr
*srr
;
1570 struct fc_frame
*fp
;
1574 rp
= rport
->dd_data
;
1575 cdb_op
= fsp
->cdb_cmd
.fc_cdb
[0];
1577 if (!(rp
->flags
& FC_RP_FLAGS_RETRY
) || rp
->rp_state
!= RPORT_ST_READY
)
1578 goto retry
; /* shouldn't happen */
1579 fp
= fc_frame_alloc(lp
, sizeof(*srr
));
1583 srr
= fc_frame_payload_get(fp
, sizeof(*srr
));
1584 memset(srr
, 0, sizeof(*srr
));
1585 srr
->srr_op
= ELS_SRR
;
1586 srr
->srr_ox_id
= htons(ep
->oxid
);
1587 srr
->srr_rx_id
= htons(ep
->rxid
);
1588 srr
->srr_r_ctl
= r_ctl
;
1589 srr
->srr_rel_off
= htonl(offset
);
1591 fc_fill_fc_hdr(fp
, FC_RCTL_ELS4_REQ
, rport
->port_id
,
1592 fc_host_port_id(rp
->local_port
->host
), FC_TYPE_FCP
,
1593 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1595 seq
= lp
->tt
.exch_seq_send(lp
, fp
, fc_fcp_srr_resp
, NULL
,
1596 fsp
, jiffies_to_msecs(FC_SCSI_REC_TOV
));
1601 fsp
->recov_seq
= seq
;
1602 fsp
->xfer_len
= offset
;
1603 fsp
->xfer_contig_end
= offset
;
1604 fsp
->state
&= ~FC_SRB_RCV_STATUS
;
1605 fc_fcp_pkt_hold(fsp
); /* hold for outstanding SRR */
1608 fc_fcp_retry_cmd(fsp
);
1612 * Handle response from SRR.
1614 static void fc_fcp_srr_resp(struct fc_seq
*seq
, struct fc_frame
*fp
, void *arg
)
1616 struct fc_fcp_pkt
*fsp
= arg
;
1617 struct fc_frame_header
*fh
;
1620 fc_fcp_srr_error(fsp
, fp
);
1624 if (fc_fcp_lock_pkt(fsp
))
1627 fh
= fc_frame_header_get(fp
);
1629 * BUG? fc_fcp_srr_error calls exch_done which would release
1630 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
1631 * then fc_exch_timeout would be sending an abort. The exch_done
1632 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
1633 * an abort response though.
1635 if (fh
->fh_type
== FC_TYPE_BLS
) {
1636 fc_fcp_unlock_pkt(fsp
);
1640 fsp
->recov_seq
= NULL
;
1641 switch (fc_frame_payload_op(fp
)) {
1643 fsp
->recov_retry
= 0;
1644 fc_fcp_timer_set(fsp
, FC_SCSI_REC_TOV
);
1648 fc_timeout_error(fsp
);
1651 fc_fcp_unlock_pkt(fsp
);
1652 fsp
->lp
->tt
.exch_done(seq
);
1655 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding SRR */
1658 static void fc_fcp_srr_error(struct fc_fcp_pkt
*fsp
, struct fc_frame
*fp
)
1660 if (fc_fcp_lock_pkt(fsp
))
1662 fsp
->lp
->tt
.exch_done(fsp
->recov_seq
);
1663 fsp
->recov_seq
= NULL
;
1664 switch (PTR_ERR(fp
)) {
1665 case -FC_EX_TIMEOUT
:
1666 if (fsp
->recov_retry
++ < FC_MAX_RECOV_RETRY
)
1669 fc_timeout_error(fsp
);
1671 case -FC_EX_CLOSED
: /* e.g., link failure */
1674 fc_fcp_retry_cmd(fsp
);
1677 fc_fcp_unlock_pkt(fsp
);
1679 fc_fcp_pkt_release(fsp
); /* drop hold for outstanding SRR */
1682 static inline int fc_fcp_lport_queue_ready(struct fc_lport
*lp
)
1685 return (lp
->state
== LPORT_ST_READY
) && lp
->link_up
&& !lp
->qfull
;
1689 * fc_queuecommand - The queuecommand function of the scsi template
1690 * @cmd: struct scsi_cmnd to be executed
1691 * @done: Callback function to be called when cmd is completed
1693 * this is the i/o strategy routine, called by the scsi layer
1694 * this routine is called with holding the host_lock.
1696 int fc_queuecommand(struct scsi_cmnd
*sc_cmd
, void (*done
)(struct scsi_cmnd
*))
1698 struct fc_lport
*lp
;
1699 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1700 struct fc_fcp_pkt
*fsp
;
1701 struct fc_rport_libfc_priv
*rp
;
1704 struct fcoe_dev_stats
*stats
;
1706 lp
= shost_priv(sc_cmd
->device
->host
);
1708 rval
= fc_remote_port_chkready(rport
);
1710 sc_cmd
->result
= rval
;
1715 if (!*(struct fc_remote_port
**)rport
->dd_data
) {
1717 * rport is transitioning from blocked/deleted to
1720 sc_cmd
->result
= DID_IMM_RETRY
<< 16;
1725 rp
= rport
->dd_data
;
1727 if (!fc_fcp_lport_queue_ready(lp
)) {
1728 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1732 fsp
= fc_fcp_pkt_alloc(lp
, GFP_ATOMIC
);
1734 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1739 * build the libfc request pkt
1741 fsp
->cmd
= sc_cmd
; /* save the cmd */
1742 fsp
->lp
= lp
; /* save the softc ptr */
1743 fsp
->rport
= rport
; /* set the remote port ptr */
1744 sc_cmd
->scsi_done
= done
;
1747 * set up the transfer length
1749 fsp
->data_len
= scsi_bufflen(sc_cmd
);
1753 * setup the data direction
1755 stats
= lp
->dev_stats
[smp_processor_id()];
1756 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1757 fsp
->req_flags
= FC_SRB_READ
;
1758 stats
->InputRequests
++;
1759 stats
->InputMegabytes
= fsp
->data_len
;
1760 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1761 fsp
->req_flags
= FC_SRB_WRITE
;
1762 stats
->OutputRequests
++;
1763 stats
->OutputMegabytes
= fsp
->data_len
;
1766 stats
->ControlRequests
++;
1769 fsp
->tgt_flags
= rp
->flags
;
1771 init_timer(&fsp
->timer
);
1772 fsp
->timer
.data
= (unsigned long)fsp
;
1775 * send it to the lower layer
1776 * if we get -1 return then put the request in the pending
1779 rval
= fc_fcp_pkt_send(lp
, fsp
);
1781 fsp
->state
= FC_SRB_FREE
;
1782 fc_fcp_pkt_release(fsp
);
1783 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1788 EXPORT_SYMBOL(fc_queuecommand
);
1791 * fc_io_compl() - Handle responses for completed commands
1794 * Translates a error to a Linux SCSI error.
1796 * The fcp packet lock must be held when calling.
1798 static void fc_io_compl(struct fc_fcp_pkt
*fsp
)
1800 struct fc_fcp_internal
*si
;
1801 struct scsi_cmnd
*sc_cmd
;
1802 struct fc_lport
*lp
;
1803 unsigned long flags
;
1805 /* release outstanding ddp context */
1806 fc_fcp_ddp_done(fsp
);
1808 fsp
->state
|= FC_SRB_COMPL
;
1809 if (!(fsp
->state
& FC_SRB_FCP_PROCESSING_TMO
)) {
1810 spin_unlock_bh(&fsp
->scsi_pkt_lock
);
1811 del_timer_sync(&fsp
->timer
);
1812 spin_lock_bh(&fsp
->scsi_pkt_lock
);
1816 si
= fc_get_scsi_internal(lp
);
1817 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
1819 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1824 * if a command timed out while we had to try and throttle IO
1825 * and it is now getting cleaned up, then we are about to
1826 * try again so clear the throttled flag incase we get more
1829 if (si
->throttled
&& fsp
->state
& FC_SRB_NOMEM
)
1835 if (!sc_cmd
->SCp
.ptr
) {
1836 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1840 CMD_SCSI_STATUS(sc_cmd
) = fsp
->cdb_status
;
1841 switch (fsp
->status_code
) {
1843 if (fsp
->cdb_status
== 0) {
1847 sc_cmd
->result
= DID_OK
<< 16;
1848 if (fsp
->scsi_resid
)
1849 CMD_RESID_LEN(sc_cmd
) = fsp
->scsi_resid
;
1850 } else if (fsp
->cdb_status
== QUEUE_FULL
) {
1851 struct scsi_device
*tmp_sdev
;
1852 struct scsi_device
*sdev
= sc_cmd
->device
;
1854 shost_for_each_device(tmp_sdev
, sdev
->host
) {
1855 if (tmp_sdev
->id
!= sdev
->id
)
1858 if (tmp_sdev
->queue_depth
> 1) {
1859 scsi_track_queue_full(tmp_sdev
,
1864 sc_cmd
->result
= (DID_OK
<< 16) | fsp
->cdb_status
;
1867 * transport level I/O was ok but scsi
1868 * has non zero status
1870 sc_cmd
->result
= (DID_OK
<< 16) | fsp
->cdb_status
;
1874 sc_cmd
->result
= DID_ERROR
<< 16;
1876 case FC_DATA_UNDRUN
:
1877 if ((fsp
->cdb_status
== 0) && !(fsp
->req_flags
& FC_SRB_READ
)) {
1879 * scsi status is good but transport level
1882 sc_cmd
->result
= DID_OK
<< 16;
1885 * scsi got underrun, this is an error
1887 CMD_RESID_LEN(sc_cmd
) = fsp
->scsi_resid
;
1888 sc_cmd
->result
= (DID_ERROR
<< 16) | fsp
->cdb_status
;
1891 case FC_DATA_OVRRUN
:
1893 * overrun is an error
1895 sc_cmd
->result
= (DID_ERROR
<< 16) | fsp
->cdb_status
;
1897 case FC_CMD_ABORTED
:
1898 sc_cmd
->result
= (DID_ABORT
<< 16) | fsp
->io_status
;
1900 case FC_CMD_TIME_OUT
:
1901 sc_cmd
->result
= (DID_BUS_BUSY
<< 16) | fsp
->io_status
;
1904 sc_cmd
->result
= (DID_RESET
<< 16);
1907 sc_cmd
->result
= (DID_NO_CONNECT
<< 16);
1910 sc_cmd
->result
= (DID_ERROR
<< 16);
1914 list_del(&fsp
->list
);
1915 sc_cmd
->SCp
.ptr
= NULL
;
1916 sc_cmd
->scsi_done(sc_cmd
);
1917 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1919 /* release ref from initial allocation in queue command */
1920 fc_fcp_pkt_release(fsp
);
1924 * fc_fcp_complete() - complete processing of a fcp packet
1927 * This function may sleep if a fsp timer is pending.
1928 * The host lock must not be held by caller.
1930 void fc_fcp_complete(struct fc_fcp_pkt
*fsp
)
1932 if (fc_fcp_lock_pkt(fsp
))
1935 fc_fcp_complete_locked(fsp
);
1936 fc_fcp_unlock_pkt(fsp
);
1938 EXPORT_SYMBOL(fc_fcp_complete
);
1941 * fc_eh_abort() - Abort a command
1942 * @sc_cmd: scsi command to abort
1944 * From scsi host template.
1945 * send ABTS to the target device and wait for the response
1946 * sc_cmd is the pointer to the command to be aborted.
1948 int fc_eh_abort(struct scsi_cmnd
*sc_cmd
)
1950 struct fc_fcp_pkt
*fsp
;
1951 struct fc_lport
*lp
;
1953 unsigned long flags
;
1955 lp
= shost_priv(sc_cmd
->device
->host
);
1956 if (lp
->state
!= LPORT_ST_READY
)
1958 else if (!lp
->link_up
)
1961 spin_lock_irqsave(lp
->host
->host_lock
, flags
);
1962 fsp
= CMD_SP(sc_cmd
);
1964 /* command completed while scsi eh was setting up */
1965 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1968 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1969 fc_fcp_pkt_hold(fsp
);
1970 spin_unlock_irqrestore(lp
->host
->host_lock
, flags
);
1972 if (fc_fcp_lock_pkt(fsp
)) {
1973 /* completed while we were waiting for timer to be deleted */
1978 rc
= fc_fcp_pkt_abort(lp
, fsp
);
1979 fc_fcp_unlock_pkt(fsp
);
1982 fc_fcp_pkt_release(fsp
);
1985 EXPORT_SYMBOL(fc_eh_abort
);
1988 * fc_eh_device_reset() Reset a single LUN
1989 * @sc_cmd: scsi command
1991 * Set from scsi host template to send tm cmd to the target and wait for the
1994 int fc_eh_device_reset(struct scsi_cmnd
*sc_cmd
)
1996 struct fc_lport
*lp
;
1997 struct fc_fcp_pkt
*fsp
;
1998 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
2000 struct fc_rport_libfc_priv
*rp
;
2003 rval
= fc_remote_port_chkready(rport
);
2007 rp
= rport
->dd_data
;
2008 lp
= shost_priv(sc_cmd
->device
->host
);
2010 if (lp
->state
!= LPORT_ST_READY
)
2013 fsp
= fc_fcp_pkt_alloc(lp
, GFP_NOIO
);
2015 FC_DBG("could not allocate scsi_pkt\n");
2016 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
2021 * Build the libfc request pkt. Do not set the scsi cmnd, because
2022 * the sc passed in is not setup for execution like when sent
2023 * through the queuecommand callout.
2025 fsp
->lp
= lp
; /* save the softc ptr */
2026 fsp
->rport
= rport
; /* set the remote port ptr */
2029 * flush outstanding commands
2031 rc
= fc_lun_reset(lp
, fsp
, scmd_id(sc_cmd
), sc_cmd
->device
->lun
);
2032 fsp
->state
= FC_SRB_FREE
;
2033 fc_fcp_pkt_release(fsp
);
2038 EXPORT_SYMBOL(fc_eh_device_reset
);
2041 * fc_eh_host_reset() - The reset function will reset the ports on the host.
2042 * @sc_cmd: scsi command
2044 int fc_eh_host_reset(struct scsi_cmnd
*sc_cmd
)
2046 struct Scsi_Host
*shost
= sc_cmd
->device
->host
;
2047 struct fc_lport
*lp
= shost_priv(shost
);
2048 unsigned long wait_tmo
;
2050 lp
->tt
.lport_reset(lp
);
2051 wait_tmo
= jiffies
+ FC_HOST_RESET_TIMEOUT
;
2052 while (!fc_fcp_lport_queue_ready(lp
) && time_before(jiffies
, wait_tmo
))
2055 if (fc_fcp_lport_queue_ready(lp
)) {
2056 shost_printk(KERN_INFO
, shost
, "Host reset succeeded.\n");
2059 shost_printk(KERN_INFO
, shost
, "Host reset failed. "
2060 "lport not ready.\n");
2064 EXPORT_SYMBOL(fc_eh_host_reset
);
2067 * fc_slave_alloc() - configure queue depth
2068 * @sdev: scsi device
2070 * Configures queue depth based on host's cmd_per_len. If not set
2071 * then we use the libfc default.
2073 int fc_slave_alloc(struct scsi_device
*sdev
)
2075 struct fc_rport
*rport
= starget_to_rport(scsi_target(sdev
));
2078 if (!rport
|| fc_remote_port_chkready(rport
))
2081 if (sdev
->tagged_supported
) {
2082 if (sdev
->host
->hostt
->cmd_per_lun
)
2083 queue_depth
= sdev
->host
->hostt
->cmd_per_lun
;
2085 queue_depth
= FC_FCP_DFLT_QUEUE_DEPTH
;
2086 scsi_activate_tcq(sdev
, queue_depth
);
2090 EXPORT_SYMBOL(fc_slave_alloc
);
2092 int fc_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2094 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
2095 return sdev
->queue_depth
;
2097 EXPORT_SYMBOL(fc_change_queue_depth
);
2099 int fc_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
2101 if (sdev
->tagged_supported
) {
2102 scsi_set_tag_type(sdev
, tag_type
);
2104 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
2106 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
2112 EXPORT_SYMBOL(fc_change_queue_type
);
2114 void fc_fcp_destroy(struct fc_lport
*lp
)
2116 struct fc_fcp_internal
*si
= fc_get_scsi_internal(lp
);
2118 if (!list_empty(&si
->scsi_pkt_queue
))
2119 printk(KERN_ERR
"Leaked scsi packets.\n");
2121 mempool_destroy(si
->scsi_pkt_pool
);
2123 lp
->scsi_priv
= NULL
;
2125 EXPORT_SYMBOL(fc_fcp_destroy
);
2127 int fc_fcp_init(struct fc_lport
*lp
)
2130 struct fc_fcp_internal
*si
;
2132 if (!lp
->tt
.fcp_cmd_send
)
2133 lp
->tt
.fcp_cmd_send
= fc_fcp_cmd_send
;
2135 if (!lp
->tt
.fcp_cleanup
)
2136 lp
->tt
.fcp_cleanup
= fc_fcp_cleanup
;
2138 if (!lp
->tt
.fcp_abort_io
)
2139 lp
->tt
.fcp_abort_io
= fc_fcp_abort_io
;
2141 si
= kzalloc(sizeof(struct fc_fcp_internal
), GFP_KERNEL
);
2145 INIT_LIST_HEAD(&si
->scsi_pkt_queue
);
2147 si
->scsi_pkt_pool
= mempool_create_slab_pool(2, scsi_pkt_cachep
);
2148 if (!si
->scsi_pkt_pool
) {
2158 EXPORT_SYMBOL(fc_fcp_init
);
2160 static int __init
libfc_init(void)
2164 scsi_pkt_cachep
= kmem_cache_create("libfc_fcp_pkt",
2165 sizeof(struct fc_fcp_pkt
),
2166 0, SLAB_HWCACHE_ALIGN
, NULL
);
2167 if (scsi_pkt_cachep
== NULL
) {
2168 FC_DBG("Unable to allocate SRB cache...module load failed!");
2172 rc
= fc_setup_exch_mgr();
2174 goto destroy_pkt_cache
;
2176 rc
= fc_setup_rport();
2182 fc_destroy_exch_mgr();
2184 kmem_cache_destroy(scsi_pkt_cachep
);
2188 static void __exit
libfc_exit(void)
2190 kmem_cache_destroy(scsi_pkt_cachep
);
2191 fc_destroy_exch_mgr();
2195 module_init(libfc_init
);
2196 module_exit(libfc_exit
);