]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/libfc/fc_fcp.c
[SCSI] libfc: use lso_max for sequence offload
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libfc / fc_fcp.c
1 /*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/spinlock.h>
27 #include <linux/scatterlist.h>
28 #include <linux/err.h>
29 #include <linux/crc32.h>
30
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_cmnd.h>
36
37 #include <scsi/fc/fc_fc2.h>
38
39 #include <scsi/libfc.h>
40 #include <scsi/fc_encode.h>
41
42 MODULE_AUTHOR("Open-FCoE.org");
43 MODULE_DESCRIPTION("libfc");
44 MODULE_LICENSE("GPL");
45
46 static int fc_fcp_debug;
47
48 #define FC_DEBUG_FCP(fmt...) \
49 do { \
50 if (fc_fcp_debug) \
51 FC_DBG(fmt); \
52 } while (0)
53
54 static struct kmem_cache *scsi_pkt_cachep;
55
56 /* SRB state definitions */
57 #define FC_SRB_FREE 0 /* cmd is free */
58 #define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
59 #define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
60 #define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
61 #define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
62 #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
63 #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
64 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
65 #define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
66
67 #define FC_SRB_READ (1 << 1)
68 #define FC_SRB_WRITE (1 << 0)
69
70 /*
71 * The SCp.ptr should be tested and set under the host lock. NULL indicates
72 * that the command has been retruned to the scsi layer.
73 */
74 #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
75 #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
76 #define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
77 #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
78 #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
79
80 struct fc_fcp_internal {
81 mempool_t *scsi_pkt_pool;
82 struct list_head scsi_pkt_queue;
83 u8 throttled;
84 };
85
86 #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
87
88 /*
89 * function prototypes
90 * FC scsi I/O related functions
91 */
92 static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
93 static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
94 static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
95 static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
96 static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
97 static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
98 static void fc_timeout_error(struct fc_fcp_pkt *);
99 static void fc_fcp_timeout(unsigned long data);
100 static void fc_fcp_rec(struct fc_fcp_pkt *);
101 static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
102 static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
103 static void fc_io_compl(struct fc_fcp_pkt *);
104
105 static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
106 static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
107 static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
108
109 /*
110 * command status codes
111 */
112 #define FC_COMPLETE 0
113 #define FC_CMD_ABORTED 1
114 #define FC_CMD_RESET 2
115 #define FC_CMD_PLOGO 3
116 #define FC_SNS_RCV 4
117 #define FC_TRANS_ERR 5
118 #define FC_DATA_OVRRUN 6
119 #define FC_DATA_UNDRUN 7
120 #define FC_ERROR 8
121 #define FC_HRD_ERROR 9
122 #define FC_CMD_TIME_OUT 10
123
124 /*
125 * Error recovery timeout values.
126 */
127 #define FC_SCSI_ER_TIMEOUT (10 * HZ)
128 #define FC_SCSI_TM_TOV (10 * HZ)
129 #define FC_SCSI_REC_TOV (2 * HZ)
130 #define FC_HOST_RESET_TIMEOUT (30 * HZ)
131
132 #define FC_MAX_ERROR_CNT 5
133 #define FC_MAX_RECOV_RETRY 3
134
135 #define FC_FCP_DFLT_QUEUE_DEPTH 32
136
137 /**
138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
139 * @lp: fc lport struct
140 * @gfp: gfp flags for allocation
141 *
142 * This is used by upper layer scsi driver.
143 * Return Value : scsi_pkt structure or null on allocation failure.
144 * Context : call from process context. no locking required.
145 */
146 static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
147 {
148 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
149 struct fc_fcp_pkt *fsp;
150
151 fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
152 if (fsp) {
153 memset(fsp, 0, sizeof(*fsp));
154 fsp->lp = lp;
155 atomic_set(&fsp->ref_cnt, 1);
156 init_timer(&fsp->timer);
157 INIT_LIST_HEAD(&fsp->list);
158 spin_lock_init(&fsp->scsi_pkt_lock);
159 }
160 return fsp;
161 }
162
163 /**
164 * fc_fcp_pkt_release() - release hold on scsi_pkt packet
165 * @fsp: fcp packet struct
166 *
167 * This is used by upper layer scsi driver.
168 * Context : call from process and interrupt context.
169 * no locking required
170 */
171 static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
172 {
173 if (atomic_dec_and_test(&fsp->ref_cnt)) {
174 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
175
176 mempool_free(fsp, si->scsi_pkt_pool);
177 }
178 }
179
180 static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
181 {
182 atomic_inc(&fsp->ref_cnt);
183 }
184
185 /**
186 * fc_fcp_pkt_destory() - release hold on scsi_pkt packet
187 * @seq: exchange sequence
188 * @fsp: fcp packet struct
189 *
190 * Release hold on scsi_pkt packet set to keep scsi_pkt
191 * till EM layer exch resource is not freed.
192 * Context : called from from EM layer.
193 * no locking required
194 */
195 static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
196 {
197 fc_fcp_pkt_release(fsp);
198 }
199
200 /**
201 * fc_fcp_lock_pkt() - lock a packet and get a ref to it.
202 * @fsp: fcp packet
203 *
204 * We should only return error if we return a command to scsi-ml before
205 * getting a response. This can happen in cases where we send a abort, but
206 * do not wait for the response and the abort and command can be passing
207 * each other on the wire/network-layer.
208 *
209 * Note: this function locks the packet and gets a reference to allow
210 * callers to call the completion function while the lock is held and
211 * not have to worry about the packets refcount.
212 *
213 * TODO: Maybe we should just have callers grab/release the lock and
214 * have a function that they call to verify the fsp and grab a ref if
215 * needed.
216 */
217 static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
218 {
219 spin_lock_bh(&fsp->scsi_pkt_lock);
220 if (fsp->state & FC_SRB_COMPL) {
221 spin_unlock_bh(&fsp->scsi_pkt_lock);
222 return -EPERM;
223 }
224
225 fc_fcp_pkt_hold(fsp);
226 return 0;
227 }
228
229 static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
230 {
231 spin_unlock_bh(&fsp->scsi_pkt_lock);
232 fc_fcp_pkt_release(fsp);
233 }
234
235 static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
236 {
237 if (!(fsp->state & FC_SRB_COMPL))
238 mod_timer(&fsp->timer, jiffies + delay);
239 }
240
241 static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
242 {
243 if (!fsp->seq_ptr)
244 return -EINVAL;
245
246 fsp->state |= FC_SRB_ABORT_PENDING;
247 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
248 }
249
250 /*
251 * Retry command.
252 * An abort isn't needed.
253 */
254 static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
255 {
256 if (fsp->seq_ptr) {
257 fsp->lp->tt.exch_done(fsp->seq_ptr);
258 fsp->seq_ptr = NULL;
259 }
260
261 fsp->state &= ~FC_SRB_ABORT_PENDING;
262 fsp->io_status = 0;
263 fsp->status_code = FC_ERROR;
264 fc_fcp_complete_locked(fsp);
265 }
266
267 /*
268 * Receive SCSI data from target.
269 * Called after receiving solicited data.
270 */
271 static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
272 {
273 struct scsi_cmnd *sc = fsp->cmd;
274 struct fc_lport *lp = fsp->lp;
275 struct fcoe_dev_stats *stats;
276 struct fc_frame_header *fh;
277 size_t start_offset;
278 size_t offset;
279 u32 crc;
280 u32 copy_len = 0;
281 size_t len;
282 void *buf;
283 struct scatterlist *sg;
284 size_t remaining;
285
286 fh = fc_frame_header_get(fp);
287 offset = ntohl(fh->fh_parm_offset);
288 start_offset = offset;
289 len = fr_len(fp) - sizeof(*fh);
290 buf = fc_frame_payload_get(fp, 0);
291
292 if (offset + len > fsp->data_len) {
293 /* this should never happen */
294 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
295 fc_frame_crc_check(fp))
296 goto crc_err;
297 FC_DEBUG_FCP("data received past end. len %zx offset %zx "
298 "data_len %x\n", len, offset, fsp->data_len);
299 fc_fcp_retry_cmd(fsp);
300 return;
301 }
302 if (offset != fsp->xfer_len)
303 fsp->state |= FC_SRB_DISCONTIG;
304
305 crc = 0;
306 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
307 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
308
309 sg = scsi_sglist(sc);
310 remaining = len;
311
312 while (remaining > 0 && sg) {
313 size_t off;
314 void *page_addr;
315 size_t sg_bytes;
316
317 if (offset >= sg->length) {
318 offset -= sg->length;
319 sg = sg_next(sg);
320 continue;
321 }
322 sg_bytes = min(remaining, sg->length - offset);
323
324 /*
325 * The scatterlist item may be bigger than PAGE_SIZE,
326 * but we are limited to mapping PAGE_SIZE at a time.
327 */
328 off = offset + sg->offset;
329 sg_bytes = min(sg_bytes, (size_t)
330 (PAGE_SIZE - (off & ~PAGE_MASK)));
331 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
332 KM_SOFTIRQ0);
333 if (!page_addr)
334 break; /* XXX panic? */
335
336 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
337 crc = crc32(crc, buf, sg_bytes);
338 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
339 sg_bytes);
340
341 kunmap_atomic(page_addr, KM_SOFTIRQ0);
342 buf += sg_bytes;
343 offset += sg_bytes;
344 remaining -= sg_bytes;
345 copy_len += sg_bytes;
346 }
347
348 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
349 buf = fc_frame_payload_get(fp, 0);
350 if (len % 4) {
351 crc = crc32(crc, buf + len, 4 - (len % 4));
352 len += 4 - (len % 4);
353 }
354
355 if (~crc != le32_to_cpu(fr_crc(fp))) {
356 crc_err:
357 stats = lp->dev_stats[smp_processor_id()];
358 stats->ErrorFrames++;
359 if (stats->InvalidCRCCount++ < 5)
360 FC_DBG("CRC error on data frame\n");
361 /*
362 * Assume the frame is total garbage.
363 * We may have copied it over the good part
364 * of the buffer.
365 * If so, we need to retry the entire operation.
366 * Otherwise, ignore it.
367 */
368 if (fsp->state & FC_SRB_DISCONTIG)
369 fc_fcp_retry_cmd(fsp);
370 return;
371 }
372 }
373
374 if (fsp->xfer_contig_end == start_offset)
375 fsp->xfer_contig_end += copy_len;
376 fsp->xfer_len += copy_len;
377
378 /*
379 * In the very rare event that this data arrived after the response
380 * and completes the transfer, call the completion handler.
381 */
382 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
383 fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
384 fc_fcp_complete_locked(fsp);
385 }
386
387 /**
388 * fc_fcp_send_data() - Send SCSI data to target.
389 * @fsp: ptr to fc_fcp_pkt
390 * @sp: ptr to this sequence
391 * @offset: starting offset for this data request
392 * @seq_blen: the burst length for this data request
393 *
394 * Called after receiving a Transfer Ready data descriptor.
395 * if LLD is capable of seq offload then send down seq_blen
396 * size of data in single frame, otherwise send multiple FC
397 * frames of max FC frame payload supported by target port.
398 *
399 * Returns : 0 for success.
400 */
401 static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
402 size_t offset, size_t seq_blen)
403 {
404 struct fc_exch *ep;
405 struct scsi_cmnd *sc;
406 struct scatterlist *sg;
407 struct fc_frame *fp = NULL;
408 struct fc_lport *lp = fsp->lp;
409 size_t remaining;
410 size_t t_blen;
411 size_t tlen;
412 size_t sg_bytes;
413 size_t frame_offset, fh_parm_offset;
414 int error;
415 void *data = NULL;
416 void *page_addr;
417 int using_sg = lp->sg_supp;
418 u32 f_ctl;
419
420 WARN_ON(seq_blen <= 0);
421 if (unlikely(offset + seq_blen > fsp->data_len)) {
422 /* this should never happen */
423 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
424 seq_blen, offset);
425 fc_fcp_send_abort(fsp);
426 return 0;
427 } else if (offset != fsp->xfer_len) {
428 /* Out of Order Data Request - no problem, but unexpected. */
429 FC_DEBUG_FCP("xfer-ready non-contiguous. "
430 "seq_blen %zx offset %zx\n", seq_blen, offset);
431 }
432
433 /*
434 * if LLD is capable of seq_offload then set transport
435 * burst length (t_blen) to seq_blen, otherwise set t_blen
436 * to max FC frame payload previously set in fsp->max_payload.
437 */
438 t_blen = fsp->max_payload;
439 if (lp->seq_offload) {
440 t_blen = min(seq_blen, (size_t)lp->lso_max);
441 FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
442 fsp, seq_blen, lp->lso_max, t_blen);
443 }
444
445 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
446 if (t_blen > 512)
447 t_blen &= ~(512 - 1); /* round down to block size */
448 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
449 sc = fsp->cmd;
450
451 remaining = seq_blen;
452 fh_parm_offset = frame_offset = offset;
453 tlen = 0;
454 seq = lp->tt.seq_start_next(seq);
455 f_ctl = FC_FC_REL_OFF;
456 WARN_ON(!seq);
457
458 /*
459 * If a get_page()/put_page() will fail, don't use sg lists
460 * in the fc_frame structure.
461 *
462 * The put_page() may be long after the I/O has completed
463 * in the case of FCoE, since the network driver does it
464 * via free_skb(). See the test in free_pages_check().
465 *
466 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
467 */
468 if (using_sg) {
469 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
470 if (page_count(sg_page(sg)) == 0 ||
471 (sg_page(sg)->flags & (1 << PG_lru |
472 1 << PG_private |
473 1 << PG_locked |
474 1 << PG_active |
475 1 << PG_slab |
476 1 << PG_swapcache |
477 1 << PG_writeback |
478 1 << PG_reserved |
479 1 << PG_buddy))) {
480 using_sg = 0;
481 break;
482 }
483 }
484 }
485 sg = scsi_sglist(sc);
486
487 while (remaining > 0 && sg) {
488 if (offset >= sg->length) {
489 offset -= sg->length;
490 sg = sg_next(sg);
491 continue;
492 }
493 if (!fp) {
494 tlen = min(t_blen, remaining);
495
496 /*
497 * TODO. Temporary workaround. fc_seq_send() can't
498 * handle odd lengths in non-linear skbs.
499 * This will be the final fragment only.
500 */
501 if (tlen % 4)
502 using_sg = 0;
503 if (using_sg) {
504 fp = _fc_frame_alloc(lp, 0);
505 if (!fp)
506 return -ENOMEM;
507 } else {
508 fp = fc_frame_alloc(lp, tlen);
509 if (!fp)
510 return -ENOMEM;
511
512 data = (void *)(fr_hdr(fp)) +
513 sizeof(struct fc_frame_header);
514 }
515 fh_parm_offset = frame_offset;
516 fr_max_payload(fp) = fsp->max_payload;
517 }
518 sg_bytes = min(tlen, sg->length - offset);
519 if (using_sg) {
520 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
521 FC_FRAME_SG_LEN);
522 get_page(sg_page(sg));
523 skb_fill_page_desc(fp_skb(fp),
524 skb_shinfo(fp_skb(fp))->nr_frags,
525 sg_page(sg), sg->offset + offset,
526 sg_bytes);
527 fp_skb(fp)->data_len += sg_bytes;
528 fr_len(fp) += sg_bytes;
529 fp_skb(fp)->truesize += PAGE_SIZE;
530 } else {
531 size_t off = offset + sg->offset;
532
533 /*
534 * The scatterlist item may be bigger than PAGE_SIZE,
535 * but we must not cross pages inside the kmap.
536 */
537 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
538 (off & ~PAGE_MASK)));
539 page_addr = kmap_atomic(sg_page(sg) +
540 (off >> PAGE_SHIFT),
541 KM_SOFTIRQ0);
542 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
543 sg_bytes);
544 kunmap_atomic(page_addr, KM_SOFTIRQ0);
545 data += sg_bytes;
546 }
547 offset += sg_bytes;
548 frame_offset += sg_bytes;
549 tlen -= sg_bytes;
550 remaining -= sg_bytes;
551
552 if (tlen)
553 continue;
554
555 /*
556 * Send sequence with transfer sequence initiative in case
557 * this is last FCP frame of the sequence.
558 */
559 if (remaining == 0)
560 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
561
562 ep = fc_seq_exch(seq);
563 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
564 FC_TYPE_FCP, f_ctl, fh_parm_offset);
565
566 /*
567 * send fragment using for a sequence.
568 */
569 error = lp->tt.seq_send(lp, seq, fp);
570 if (error) {
571 WARN_ON(1); /* send error should be rare */
572 fc_fcp_retry_cmd(fsp);
573 return 0;
574 }
575 fp = NULL;
576 }
577 fsp->xfer_len += seq_blen; /* premature count? */
578 return 0;
579 }
580
581 static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
582 {
583 int ba_done = 1;
584 struct fc_ba_rjt *brp;
585 struct fc_frame_header *fh;
586
587 fh = fc_frame_header_get(fp);
588 switch (fh->fh_r_ctl) {
589 case FC_RCTL_BA_ACC:
590 break;
591 case FC_RCTL_BA_RJT:
592 brp = fc_frame_payload_get(fp, sizeof(*brp));
593 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
594 break;
595 /* fall thru */
596 default:
597 /*
598 * we will let the command timeout
599 * and scsi-ml recover in this case,
600 * therefore cleared the ba_done flag.
601 */
602 ba_done = 0;
603 }
604
605 if (ba_done) {
606 fsp->state |= FC_SRB_ABORTED;
607 fsp->state &= ~FC_SRB_ABORT_PENDING;
608
609 if (fsp->wait_for_comp)
610 complete(&fsp->tm_done);
611 else
612 fc_fcp_complete_locked(fsp);
613 }
614 }
615
616 /**
617 * fc_fcp_reduce_can_queue() - drop can_queue
618 * @lp: lport to drop queueing for
619 *
620 * If we are getting memory allocation failures, then we may
621 * be trying to execute too many commands. We let the running
622 * commands complete or timeout, then try again with a reduced
623 * can_queue. Eventually we will hit the point where we run
624 * on all reserved structs.
625 */
626 static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
627 {
628 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
629 unsigned long flags;
630 int can_queue;
631
632 spin_lock_irqsave(lp->host->host_lock, flags);
633 if (si->throttled)
634 goto done;
635 si->throttled = 1;
636
637 can_queue = lp->host->can_queue;
638 can_queue >>= 1;
639 if (!can_queue)
640 can_queue = 1;
641 lp->host->can_queue = can_queue;
642 shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
643 "Reducing can_queue to %d.\n", can_queue);
644 done:
645 spin_unlock_irqrestore(lp->host->host_lock, flags);
646 }
647
648 /**
649 * fc_fcp_recv() - Reveive FCP frames
650 * @seq: The sequence the frame is on
651 * @fp: The FC frame
652 * @arg: The related FCP packet
653 *
654 * Return : None
655 * Context : called from Soft IRQ context
656 * can not called holding list lock
657 */
658 static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
659 {
660 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
661 struct fc_lport *lp;
662 struct fc_frame_header *fh;
663 struct fcp_txrdy *dd;
664 u8 r_ctl;
665 int rc = 0;
666
667 if (IS_ERR(fp))
668 goto errout;
669
670 fh = fc_frame_header_get(fp);
671 r_ctl = fh->fh_r_ctl;
672 lp = fsp->lp;
673
674 if (!(lp->state & LPORT_ST_READY))
675 goto out;
676 if (fc_fcp_lock_pkt(fsp))
677 goto out;
678 fsp->last_pkt_time = jiffies;
679
680 if (fh->fh_type == FC_TYPE_BLS) {
681 fc_fcp_abts_resp(fsp, fp);
682 goto unlock;
683 }
684
685 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
686 goto unlock;
687
688 if (r_ctl == FC_RCTL_DD_DATA_DESC) {
689 /*
690 * received XFER RDY from the target
691 * need to send data to the target
692 */
693 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
694 dd = fc_frame_payload_get(fp, sizeof(*dd));
695 WARN_ON(!dd);
696
697 rc = fc_fcp_send_data(fsp, seq,
698 (size_t) ntohl(dd->ft_data_ro),
699 (size_t) ntohl(dd->ft_burst_len));
700 if (!rc)
701 seq->rec_data = fsp->xfer_len;
702 else if (rc == -ENOMEM)
703 fsp->state |= FC_SRB_NOMEM;
704 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
705 /*
706 * received a DATA frame
707 * next we will copy the data to the system buffer
708 */
709 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
710 fc_fcp_recv_data(fsp, fp);
711 seq->rec_data = fsp->xfer_contig_end;
712 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
713 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
714
715 fc_fcp_resp(fsp, fp);
716 } else {
717 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
718 }
719 unlock:
720 fc_fcp_unlock_pkt(fsp);
721 out:
722 fc_frame_free(fp);
723 errout:
724 if (IS_ERR(fp))
725 fc_fcp_error(fsp, fp);
726 else if (rc == -ENOMEM)
727 fc_fcp_reduce_can_queue(lp);
728 }
729
730 static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
731 {
732 struct fc_frame_header *fh;
733 struct fcp_resp *fc_rp;
734 struct fcp_resp_ext *rp_ex;
735 struct fcp_resp_rsp_info *fc_rp_info;
736 u32 plen;
737 u32 expected_len;
738 u32 respl = 0;
739 u32 snsl = 0;
740 u8 flags = 0;
741
742 plen = fr_len(fp);
743 fh = (struct fc_frame_header *)fr_hdr(fp);
744 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
745 goto len_err;
746 plen -= sizeof(*fh);
747 fc_rp = (struct fcp_resp *)(fh + 1);
748 fsp->cdb_status = fc_rp->fr_status;
749 flags = fc_rp->fr_flags;
750 fsp->scsi_comp_flags = flags;
751 expected_len = fsp->data_len;
752
753 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
754 rp_ex = (void *)(fc_rp + 1);
755 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
756 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
757 goto len_err;
758 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
759 if (flags & FCP_RSP_LEN_VAL) {
760 respl = ntohl(rp_ex->fr_rsp_len);
761 if (respl != sizeof(*fc_rp_info))
762 goto len_err;
763 if (fsp->wait_for_comp) {
764 /* Abuse cdb_status for rsp code */
765 fsp->cdb_status = fc_rp_info->rsp_code;
766 complete(&fsp->tm_done);
767 /*
768 * tmfs will not have any scsi cmd so
769 * exit here
770 */
771 return;
772 } else
773 goto err;
774 }
775 if (flags & FCP_SNS_LEN_VAL) {
776 snsl = ntohl(rp_ex->fr_sns_len);
777 if (snsl > SCSI_SENSE_BUFFERSIZE)
778 snsl = SCSI_SENSE_BUFFERSIZE;
779 memcpy(fsp->cmd->sense_buffer,
780 (char *)fc_rp_info + respl, snsl);
781 }
782 }
783 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
784 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
785 goto len_err;
786 if (flags & FCP_RESID_UNDER) {
787 fsp->scsi_resid = ntohl(rp_ex->fr_resid);
788 /*
789 * The cmnd->underflow is the minimum number of
790 * bytes that must be transfered for this
791 * command. Provided a sense condition is not
792 * present, make sure the actual amount
793 * transferred is at least the underflow value
794 * or fail.
795 */
796 if (!(flags & FCP_SNS_LEN_VAL) &&
797 (fc_rp->fr_status == 0) &&
798 (scsi_bufflen(fsp->cmd) -
799 fsp->scsi_resid) < fsp->cmd->underflow)
800 goto err;
801 expected_len -= fsp->scsi_resid;
802 } else {
803 fsp->status_code = FC_ERROR;
804 }
805 }
806 }
807 fsp->state |= FC_SRB_RCV_STATUS;
808
809 /*
810 * Check for missing or extra data frames.
811 */
812 if (unlikely(fsp->xfer_len != expected_len)) {
813 if (fsp->xfer_len < expected_len) {
814 /*
815 * Some data may be queued locally,
816 * Wait a at least one jiffy to see if it is delivered.
817 * If this expires without data, we may do SRR.
818 */
819 fc_fcp_timer_set(fsp, 2);
820 return;
821 }
822 fsp->status_code = FC_DATA_OVRRUN;
823 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
824 "data len %x\n",
825 fsp->rport->port_id,
826 fsp->xfer_len, expected_len, fsp->data_len);
827 }
828 fc_fcp_complete_locked(fsp);
829 return;
830
831 len_err:
832 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
833 flags, fr_len(fp), respl, snsl);
834 err:
835 fsp->status_code = FC_ERROR;
836 fc_fcp_complete_locked(fsp);
837 }
838
839 /**
840 * fc_fcp_complete_locked() - complete processing of a fcp packet
841 * @fsp: fcp packet
842 *
843 * This function may sleep if a timer is pending. The packet lock must be
844 * held, and the host lock must not be held.
845 */
846 static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
847 {
848 struct fc_lport *lp = fsp->lp;
849 struct fc_seq *seq;
850 struct fc_exch *ep;
851 u32 f_ctl;
852
853 if (fsp->state & FC_SRB_ABORT_PENDING)
854 return;
855
856 if (fsp->state & FC_SRB_ABORTED) {
857 if (!fsp->status_code)
858 fsp->status_code = FC_CMD_ABORTED;
859 } else {
860 /*
861 * Test for transport underrun, independent of response
862 * underrun status.
863 */
864 if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
865 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
866 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
867 fsp->status_code = FC_DATA_UNDRUN;
868 fsp->io_status = 0;
869 }
870 }
871
872 seq = fsp->seq_ptr;
873 if (seq) {
874 fsp->seq_ptr = NULL;
875 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
876 struct fc_frame *conf_frame;
877 struct fc_seq *csp;
878
879 csp = lp->tt.seq_start_next(seq);
880 conf_frame = fc_frame_alloc(fsp->lp, 0);
881 if (conf_frame) {
882 f_ctl = FC_FC_SEQ_INIT;
883 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
884 ep = fc_seq_exch(seq);
885 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
886 ep->did, ep->sid,
887 FC_TYPE_FCP, f_ctl, 0);
888 lp->tt.seq_send(lp, csp, conf_frame);
889 }
890 }
891 lp->tt.exch_done(seq);
892 }
893 fc_io_compl(fsp);
894 }
895
896 static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
897 {
898 struct fc_lport *lp = fsp->lp;
899
900 if (fsp->seq_ptr) {
901 lp->tt.exch_done(fsp->seq_ptr);
902 fsp->seq_ptr = NULL;
903 }
904 fsp->status_code = error;
905 }
906
907 /**
908 * fc_fcp_cleanup_each_cmd() - Cleanup active commads
909 * @lp: logical port
910 * @id: target id
911 * @lun: lun
912 * @error: fsp status code
913 *
914 * If lun or id is -1, they are ignored.
915 */
916 static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
917 unsigned int lun, int error)
918 {
919 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
920 struct fc_fcp_pkt *fsp;
921 struct scsi_cmnd *sc_cmd;
922 unsigned long flags;
923
924 spin_lock_irqsave(lp->host->host_lock, flags);
925 restart:
926 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
927 sc_cmd = fsp->cmd;
928 if (id != -1 && scmd_id(sc_cmd) != id)
929 continue;
930
931 if (lun != -1 && sc_cmd->device->lun != lun)
932 continue;
933
934 fc_fcp_pkt_hold(fsp);
935 spin_unlock_irqrestore(lp->host->host_lock, flags);
936
937 if (!fc_fcp_lock_pkt(fsp)) {
938 fc_fcp_cleanup_cmd(fsp, error);
939 fc_io_compl(fsp);
940 fc_fcp_unlock_pkt(fsp);
941 }
942
943 fc_fcp_pkt_release(fsp);
944 spin_lock_irqsave(lp->host->host_lock, flags);
945 /*
946 * while we dropped the lock multiple pkts could
947 * have been released, so we have to start over.
948 */
949 goto restart;
950 }
951 spin_unlock_irqrestore(lp->host->host_lock, flags);
952 }
953
954 static void fc_fcp_abort_io(struct fc_lport *lp)
955 {
956 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
957 }
958
959 /**
960 * fc_fcp_pkt_send() - send a fcp packet to the lower level.
961 * @lp: fc lport
962 * @fsp: fc packet.
963 *
964 * This is called by upper layer protocol.
965 * Return : zero for success and -1 for failure
966 * Context : called from queuecommand which can be called from process
967 * or scsi soft irq.
968 * Locks : called with the host lock and irqs disabled.
969 */
970 static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
971 {
972 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
973 int rc;
974
975 fsp->cmd->SCp.ptr = (char *)fsp;
976 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
977 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
978
979 int_to_scsilun(fsp->cmd->device->lun,
980 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
981 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
982 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
983
984 spin_unlock_irq(lp->host->host_lock);
985 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
986 spin_lock_irq(lp->host->host_lock);
987 if (rc)
988 list_del(&fsp->list);
989
990 return rc;
991 }
992
993 static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
994 void (*resp)(struct fc_seq *,
995 struct fc_frame *fp,
996 void *arg))
997 {
998 struct fc_frame *fp;
999 struct fc_seq *seq;
1000 struct fc_rport *rport;
1001 struct fc_rport_libfc_priv *rp;
1002 const size_t len = sizeof(fsp->cdb_cmd);
1003 int rc = 0;
1004
1005 if (fc_fcp_lock_pkt(fsp))
1006 return 0;
1007
1008 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
1009 if (!fp) {
1010 rc = -1;
1011 goto unlock;
1012 }
1013
1014 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
1015 fr_cmd(fp) = fsp->cmd;
1016 rport = fsp->rport;
1017 fsp->max_payload = rport->maxframe_size;
1018 rp = rport->dd_data;
1019
1020 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1021 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1022 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1023
1024 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
1025 if (!seq) {
1026 fc_frame_free(fp);
1027 rc = -1;
1028 goto unlock;
1029 }
1030 fsp->last_pkt_time = jiffies;
1031 fsp->seq_ptr = seq;
1032 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1033
1034 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1035 fc_fcp_timer_set(fsp,
1036 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
1037 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
1038 unlock:
1039 fc_fcp_unlock_pkt(fsp);
1040 return rc;
1041 }
1042
1043 /*
1044 * transport error handler
1045 */
1046 static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1047 {
1048 int error = PTR_ERR(fp);
1049
1050 if (fc_fcp_lock_pkt(fsp))
1051 return;
1052
1053 switch (error) {
1054 case -FC_EX_CLOSED:
1055 fc_fcp_retry_cmd(fsp);
1056 goto unlock;
1057 default:
1058 FC_DBG("unknown error %ld\n", PTR_ERR(fp));
1059 }
1060 /*
1061 * clear abort pending, because the lower layer
1062 * decided to force completion.
1063 */
1064 fsp->state &= ~FC_SRB_ABORT_PENDING;
1065 fsp->status_code = FC_CMD_PLOGO;
1066 fc_fcp_complete_locked(fsp);
1067 unlock:
1068 fc_fcp_unlock_pkt(fsp);
1069 }
1070
1071 /*
1072 * Scsi abort handler- calls to send an abort
1073 * and then wait for abort completion
1074 */
1075 static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1076 {
1077 int rc = FAILED;
1078
1079 if (fc_fcp_send_abort(fsp))
1080 return FAILED;
1081
1082 init_completion(&fsp->tm_done);
1083 fsp->wait_for_comp = 1;
1084
1085 spin_unlock_bh(&fsp->scsi_pkt_lock);
1086 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1087 spin_lock_bh(&fsp->scsi_pkt_lock);
1088 fsp->wait_for_comp = 0;
1089
1090 if (!rc) {
1091 FC_DBG("target abort cmd failed\n");
1092 rc = FAILED;
1093 } else if (fsp->state & FC_SRB_ABORTED) {
1094 FC_DBG("target abort cmd passed\n");
1095 rc = SUCCESS;
1096 fc_fcp_complete_locked(fsp);
1097 }
1098
1099 return rc;
1100 }
1101
1102 /*
1103 * Retry LUN reset after resource allocation failed.
1104 */
1105 static void fc_lun_reset_send(unsigned long data)
1106 {
1107 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1108 struct fc_lport *lp = fsp->lp;
1109 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
1110 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1111 return;
1112 if (fc_fcp_lock_pkt(fsp))
1113 return;
1114 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
1115 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1116 fc_fcp_unlock_pkt(fsp);
1117 }
1118 }
1119
1120 /*
1121 * Scsi device reset handler- send a LUN RESET to the device
1122 * and wait for reset reply
1123 */
1124 static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1125 unsigned int id, unsigned int lun)
1126 {
1127 int rc;
1128
1129 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
1130 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
1131 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1132
1133 fsp->wait_for_comp = 1;
1134 init_completion(&fsp->tm_done);
1135
1136 fc_lun_reset_send((unsigned long)fsp);
1137
1138 /*
1139 * wait for completion of reset
1140 * after that make sure all commands are terminated
1141 */
1142 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1143
1144 spin_lock_bh(&fsp->scsi_pkt_lock);
1145 fsp->state |= FC_SRB_COMPL;
1146 spin_unlock_bh(&fsp->scsi_pkt_lock);
1147
1148 del_timer_sync(&fsp->timer);
1149
1150 spin_lock_bh(&fsp->scsi_pkt_lock);
1151 if (fsp->seq_ptr) {
1152 lp->tt.exch_done(fsp->seq_ptr);
1153 fsp->seq_ptr = NULL;
1154 }
1155 fsp->wait_for_comp = 0;
1156 spin_unlock_bh(&fsp->scsi_pkt_lock);
1157
1158 if (!rc) {
1159 FC_DBG("lun reset failed\n");
1160 return FAILED;
1161 }
1162
1163 /* cdb_status holds the tmf's rsp code */
1164 if (fsp->cdb_status != FCP_TMF_CMPL)
1165 return FAILED;
1166
1167 FC_DBG("lun reset to lun %u completed\n", lun);
1168 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
1169 return SUCCESS;
1170 }
1171
1172 /*
1173 * Task Managment response handler
1174 */
1175 static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1176 {
1177 struct fc_fcp_pkt *fsp = arg;
1178 struct fc_frame_header *fh;
1179
1180 if (IS_ERR(fp)) {
1181 /*
1182 * If there is an error just let it timeout or wait
1183 * for TMF to be aborted if it timedout.
1184 *
1185 * scsi-eh will escalate for when either happens.
1186 */
1187 return;
1188 }
1189
1190 if (fc_fcp_lock_pkt(fsp))
1191 return;
1192
1193 /*
1194 * raced with eh timeout handler.
1195 */
1196 if (!fsp->seq_ptr || !fsp->wait_for_comp) {
1197 spin_unlock_bh(&fsp->scsi_pkt_lock);
1198 return;
1199 }
1200
1201 fh = fc_frame_header_get(fp);
1202 if (fh->fh_type != FC_TYPE_BLS)
1203 fc_fcp_resp(fsp, fp);
1204 fsp->seq_ptr = NULL;
1205 fsp->lp->tt.exch_done(seq);
1206 fc_frame_free(fp);
1207 fc_fcp_unlock_pkt(fsp);
1208 }
1209
1210 static void fc_fcp_cleanup(struct fc_lport *lp)
1211 {
1212 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
1213 }
1214
1215 /*
1216 * fc_fcp_timeout: called by OS timer function.
1217 *
1218 * The timer has been inactivated and must be reactivated if desired
1219 * using fc_fcp_timer_set().
1220 *
1221 * Algorithm:
1222 *
1223 * If REC is supported, just issue it, and return. The REC exchange will
1224 * complete or time out, and recovery can continue at that point.
1225 *
1226 * Otherwise, if the response has been received without all the data,
1227 * it has been ER_TIMEOUT since the response was received.
1228 *
1229 * If the response has not been received,
1230 * we see if data was received recently. If it has been, we continue waiting,
1231 * otherwise, we abort the command.
1232 */
1233 static void fc_fcp_timeout(unsigned long data)
1234 {
1235 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1236 struct fc_rport *rport = fsp->rport;
1237 struct fc_rport_libfc_priv *rp = rport->dd_data;
1238
1239 if (fc_fcp_lock_pkt(fsp))
1240 return;
1241
1242 if (fsp->cdb_cmd.fc_tm_flags)
1243 goto unlock;
1244
1245 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1246
1247 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
1248 fc_fcp_rec(fsp);
1249 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
1250 jiffies))
1251 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1252 else if (fsp->state & FC_SRB_RCV_STATUS)
1253 fc_fcp_complete_locked(fsp);
1254 else
1255 fc_timeout_error(fsp);
1256 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
1257 unlock:
1258 fc_fcp_unlock_pkt(fsp);
1259 }
1260
1261 /*
1262 * Send a REC ELS request
1263 */
1264 static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1265 {
1266 struct fc_lport *lp;
1267 struct fc_frame *fp;
1268 struct fc_rport *rport;
1269 struct fc_rport_libfc_priv *rp;
1270
1271 lp = fsp->lp;
1272 rport = fsp->rport;
1273 rp = rport->dd_data;
1274 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
1275 fsp->status_code = FC_HRD_ERROR;
1276 fsp->io_status = 0;
1277 fc_fcp_complete_locked(fsp);
1278 return;
1279 }
1280 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
1281 if (!fp)
1282 goto retry;
1283
1284 fr_seq(fp) = fsp->seq_ptr;
1285 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1286 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
1287 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1288 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
1289 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1290 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1291 return;
1292 }
1293 fc_frame_free(fp);
1294 retry:
1295 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1296 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1297 else
1298 fc_timeout_error(fsp);
1299 }
1300
1301 /*
1302 * Receive handler for REC ELS frame
1303 * if it is a reject then let the scsi layer to handle
1304 * the timeout. if it is a LS_ACC then if the io was not completed
1305 * then set the timeout and return otherwise complete the exchange
1306 * and tell the scsi layer to restart the I/O.
1307 */
1308 static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1309 {
1310 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
1311 struct fc_els_rec_acc *recp;
1312 struct fc_els_ls_rjt *rjt;
1313 u32 e_stat;
1314 u8 opcode;
1315 u32 offset;
1316 enum dma_data_direction data_dir;
1317 enum fc_rctl r_ctl;
1318 struct fc_rport_libfc_priv *rp;
1319
1320 if (IS_ERR(fp)) {
1321 fc_fcp_rec_error(fsp, fp);
1322 return;
1323 }
1324
1325 if (fc_fcp_lock_pkt(fsp))
1326 goto out;
1327
1328 fsp->recov_retry = 0;
1329 opcode = fc_frame_payload_op(fp);
1330 if (opcode == ELS_LS_RJT) {
1331 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1332 switch (rjt->er_reason) {
1333 default:
1334 FC_DEBUG_FCP("device %x unexpected REC reject "
1335 "reason %d expl %d\n",
1336 fsp->rport->port_id, rjt->er_reason,
1337 rjt->er_explan);
1338 /* fall through */
1339 case ELS_RJT_UNSUP:
1340 FC_DEBUG_FCP("device does not support REC\n");
1341 rp = fsp->rport->dd_data;
1342 /*
1343 * if we do not spport RECs or got some bogus
1344 * reason then resetup timer so we check for
1345 * making progress.
1346 */
1347 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1348 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1349 break;
1350 case ELS_RJT_LOGIC:
1351 case ELS_RJT_UNAB:
1352 /*
1353 * If no data transfer, the command frame got dropped
1354 * so we just retry. If data was transferred, we
1355 * lost the response but the target has no record,
1356 * so we abort and retry.
1357 */
1358 if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
1359 fsp->xfer_len == 0) {
1360 fc_fcp_retry_cmd(fsp);
1361 break;
1362 }
1363 fc_timeout_error(fsp);
1364 break;
1365 }
1366 } else if (opcode == ELS_LS_ACC) {
1367 if (fsp->state & FC_SRB_ABORTED)
1368 goto unlock_out;
1369
1370 data_dir = fsp->cmd->sc_data_direction;
1371 recp = fc_frame_payload_get(fp, sizeof(*recp));
1372 offset = ntohl(recp->reca_fc4value);
1373 e_stat = ntohl(recp->reca_e_stat);
1374
1375 if (e_stat & ESB_ST_COMPLETE) {
1376
1377 /*
1378 * The exchange is complete.
1379 *
1380 * For output, we must've lost the response.
1381 * For input, all data must've been sent.
1382 * We lost may have lost the response
1383 * (and a confirmation was requested) and maybe
1384 * some data.
1385 *
1386 * If all data received, send SRR
1387 * asking for response. If partial data received,
1388 * or gaps, SRR requests data at start of gap.
1389 * Recovery via SRR relies on in-order-delivery.
1390 */
1391 if (data_dir == DMA_TO_DEVICE) {
1392 r_ctl = FC_RCTL_DD_CMD_STATUS;
1393 } else if (fsp->xfer_contig_end == offset) {
1394 r_ctl = FC_RCTL_DD_CMD_STATUS;
1395 } else {
1396 offset = fsp->xfer_contig_end;
1397 r_ctl = FC_RCTL_DD_SOL_DATA;
1398 }
1399 fc_fcp_srr(fsp, r_ctl, offset);
1400 } else if (e_stat & ESB_ST_SEQ_INIT) {
1401
1402 /*
1403 * The remote port has the initiative, so just
1404 * keep waiting for it to complete.
1405 */
1406 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1407 } else {
1408
1409 /*
1410 * The exchange is incomplete, we have seq. initiative.
1411 * Lost response with requested confirmation,
1412 * lost confirmation, lost transfer ready or
1413 * lost write data.
1414 *
1415 * For output, if not all data was received, ask
1416 * for transfer ready to be repeated.
1417 *
1418 * If we received or sent all the data, send SRR to
1419 * request response.
1420 *
1421 * If we lost a response, we may have lost some read
1422 * data as well.
1423 */
1424 r_ctl = FC_RCTL_DD_SOL_DATA;
1425 if (data_dir == DMA_TO_DEVICE) {
1426 r_ctl = FC_RCTL_DD_CMD_STATUS;
1427 if (offset < fsp->data_len)
1428 r_ctl = FC_RCTL_DD_DATA_DESC;
1429 } else if (offset == fsp->xfer_contig_end) {
1430 r_ctl = FC_RCTL_DD_CMD_STATUS;
1431 } else if (fsp->xfer_contig_end < offset) {
1432 offset = fsp->xfer_contig_end;
1433 }
1434 fc_fcp_srr(fsp, r_ctl, offset);
1435 }
1436 }
1437 unlock_out:
1438 fc_fcp_unlock_pkt(fsp);
1439 out:
1440 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1441 fc_frame_free(fp);
1442 }
1443
1444 /*
1445 * Handle error response or timeout for REC exchange.
1446 */
1447 static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1448 {
1449 int error = PTR_ERR(fp);
1450
1451 if (fc_fcp_lock_pkt(fsp))
1452 goto out;
1453
1454 switch (error) {
1455 case -FC_EX_CLOSED:
1456 fc_fcp_retry_cmd(fsp);
1457 break;
1458
1459 default:
1460 FC_DBG("REC %p fid %x error unexpected error %d\n",
1461 fsp, fsp->rport->port_id, error);
1462 fsp->status_code = FC_CMD_PLOGO;
1463 /* fall through */
1464
1465 case -FC_EX_TIMEOUT:
1466 /*
1467 * Assume REC or LS_ACC was lost.
1468 * The exchange manager will have aborted REC, so retry.
1469 */
1470 FC_DBG("REC fid %x error error %d retry %d/%d\n",
1471 fsp->rport->port_id, error, fsp->recov_retry,
1472 FC_MAX_RECOV_RETRY);
1473 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1474 fc_fcp_rec(fsp);
1475 else
1476 fc_timeout_error(fsp);
1477 break;
1478 }
1479 fc_fcp_unlock_pkt(fsp);
1480 out:
1481 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1482 }
1483
1484 /*
1485 * Time out error routine:
1486 * abort's the I/O close the exchange and
1487 * send completion notification to scsi layer
1488 */
1489 static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1490 {
1491 fsp->status_code = FC_CMD_TIME_OUT;
1492 fsp->cdb_status = 0;
1493 fsp->io_status = 0;
1494 /*
1495 * if this fails then we let the scsi command timer fire and
1496 * scsi-ml escalate.
1497 */
1498 fc_fcp_send_abort(fsp);
1499 }
1500
1501 /*
1502 * Sequence retransmission request.
1503 * This is called after receiving status but insufficient data, or
1504 * when expecting status but the request has timed out.
1505 */
1506 static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1507 {
1508 struct fc_lport *lp = fsp->lp;
1509 struct fc_rport *rport;
1510 struct fc_rport_libfc_priv *rp;
1511 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
1512 struct fc_seq *seq;
1513 struct fcp_srr *srr;
1514 struct fc_frame *fp;
1515 u8 cdb_op;
1516
1517 rport = fsp->rport;
1518 rp = rport->dd_data;
1519 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1520
1521 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
1522 goto retry; /* shouldn't happen */
1523 fp = fc_frame_alloc(lp, sizeof(*srr));
1524 if (!fp)
1525 goto retry;
1526
1527 srr = fc_frame_payload_get(fp, sizeof(*srr));
1528 memset(srr, 0, sizeof(*srr));
1529 srr->srr_op = ELS_SRR;
1530 srr->srr_ox_id = htons(ep->oxid);
1531 srr->srr_rx_id = htons(ep->rxid);
1532 srr->srr_r_ctl = r_ctl;
1533 srr->srr_rel_off = htonl(offset);
1534
1535 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1536 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1537 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1538
1539 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
1540 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
1541 if (!seq) {
1542 fc_frame_free(fp);
1543 goto retry;
1544 }
1545 fsp->recov_seq = seq;
1546 fsp->xfer_len = offset;
1547 fsp->xfer_contig_end = offset;
1548 fsp->state &= ~FC_SRB_RCV_STATUS;
1549 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
1550 return;
1551 retry:
1552 fc_fcp_retry_cmd(fsp);
1553 }
1554
1555 /*
1556 * Handle response from SRR.
1557 */
1558 static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1559 {
1560 struct fc_fcp_pkt *fsp = arg;
1561 struct fc_frame_header *fh;
1562
1563 if (IS_ERR(fp)) {
1564 fc_fcp_srr_error(fsp, fp);
1565 return;
1566 }
1567
1568 if (fc_fcp_lock_pkt(fsp))
1569 goto out;
1570
1571 fh = fc_frame_header_get(fp);
1572 /*
1573 * BUG? fc_fcp_srr_error calls exch_done which would release
1574 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
1575 * then fc_exch_timeout would be sending an abort. The exch_done
1576 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
1577 * an abort response though.
1578 */
1579 if (fh->fh_type == FC_TYPE_BLS) {
1580 fc_fcp_unlock_pkt(fsp);
1581 return;
1582 }
1583
1584 fsp->recov_seq = NULL;
1585 switch (fc_frame_payload_op(fp)) {
1586 case ELS_LS_ACC:
1587 fsp->recov_retry = 0;
1588 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1589 break;
1590 case ELS_LS_RJT:
1591 default:
1592 fc_timeout_error(fsp);
1593 break;
1594 }
1595 fc_fcp_unlock_pkt(fsp);
1596 fsp->lp->tt.exch_done(seq);
1597 out:
1598 fc_frame_free(fp);
1599 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1600 }
1601
1602 static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1603 {
1604 if (fc_fcp_lock_pkt(fsp))
1605 goto out;
1606 fsp->lp->tt.exch_done(fsp->recov_seq);
1607 fsp->recov_seq = NULL;
1608 switch (PTR_ERR(fp)) {
1609 case -FC_EX_TIMEOUT:
1610 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1611 fc_fcp_rec(fsp);
1612 else
1613 fc_timeout_error(fsp);
1614 break;
1615 case -FC_EX_CLOSED: /* e.g., link failure */
1616 /* fall through */
1617 default:
1618 fc_fcp_retry_cmd(fsp);
1619 break;
1620 }
1621 fc_fcp_unlock_pkt(fsp);
1622 out:
1623 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1624 }
1625
1626 static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
1627 {
1628 /* lock ? */
1629 return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull;
1630 }
1631
1632 /**
1633 * fc_queuecommand - The queuecommand function of the scsi template
1634 * @cmd: struct scsi_cmnd to be executed
1635 * @done: Callback function to be called when cmd is completed
1636 *
1637 * this is the i/o strategy routine, called by the scsi layer
1638 * this routine is called with holding the host_lock.
1639 */
1640 int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1641 {
1642 struct fc_lport *lp;
1643 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1644 struct fc_fcp_pkt *fsp;
1645 struct fc_rport_libfc_priv *rp;
1646 int rval;
1647 int rc = 0;
1648 struct fcoe_dev_stats *stats;
1649
1650 lp = shost_priv(sc_cmd->device->host);
1651
1652 rval = fc_remote_port_chkready(rport);
1653 if (rval) {
1654 sc_cmd->result = rval;
1655 done(sc_cmd);
1656 goto out;
1657 }
1658
1659 if (!*(struct fc_remote_port **)rport->dd_data) {
1660 /*
1661 * rport is transitioning from blocked/deleted to
1662 * online
1663 */
1664 sc_cmd->result = DID_IMM_RETRY << 16;
1665 done(sc_cmd);
1666 goto out;
1667 }
1668
1669 rp = rport->dd_data;
1670
1671 if (!fc_fcp_lport_queue_ready(lp)) {
1672 rc = SCSI_MLQUEUE_HOST_BUSY;
1673 goto out;
1674 }
1675
1676 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
1677 if (fsp == NULL) {
1678 rc = SCSI_MLQUEUE_HOST_BUSY;
1679 goto out;
1680 }
1681
1682 /*
1683 * build the libfc request pkt
1684 */
1685 fsp->cmd = sc_cmd; /* save the cmd */
1686 fsp->lp = lp; /* save the softc ptr */
1687 fsp->rport = rport; /* set the remote port ptr */
1688 sc_cmd->scsi_done = done;
1689
1690 /*
1691 * set up the transfer length
1692 */
1693 fsp->data_len = scsi_bufflen(sc_cmd);
1694 fsp->xfer_len = 0;
1695
1696 /*
1697 * setup the data direction
1698 */
1699 stats = lp->dev_stats[smp_processor_id()];
1700 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1701 fsp->req_flags = FC_SRB_READ;
1702 stats->InputRequests++;
1703 stats->InputMegabytes = fsp->data_len;
1704 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1705 fsp->req_flags = FC_SRB_WRITE;
1706 stats->OutputRequests++;
1707 stats->OutputMegabytes = fsp->data_len;
1708 } else {
1709 fsp->req_flags = 0;
1710 stats->ControlRequests++;
1711 }
1712
1713 fsp->tgt_flags = rp->flags;
1714
1715 init_timer(&fsp->timer);
1716 fsp->timer.data = (unsigned long)fsp;
1717
1718 /*
1719 * send it to the lower layer
1720 * if we get -1 return then put the request in the pending
1721 * queue.
1722 */
1723 rval = fc_fcp_pkt_send(lp, fsp);
1724 if (rval != 0) {
1725 fsp->state = FC_SRB_FREE;
1726 fc_fcp_pkt_release(fsp);
1727 rc = SCSI_MLQUEUE_HOST_BUSY;
1728 }
1729 out:
1730 return rc;
1731 }
1732 EXPORT_SYMBOL(fc_queuecommand);
1733
1734 /**
1735 * fc_io_compl() - Handle responses for completed commands
1736 * @fsp: scsi packet
1737 *
1738 * Translates a error to a Linux SCSI error.
1739 *
1740 * The fcp packet lock must be held when calling.
1741 */
1742 static void fc_io_compl(struct fc_fcp_pkt *fsp)
1743 {
1744 struct fc_fcp_internal *si;
1745 struct scsi_cmnd *sc_cmd;
1746 struct fc_lport *lp;
1747 unsigned long flags;
1748
1749 fsp->state |= FC_SRB_COMPL;
1750 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
1751 spin_unlock_bh(&fsp->scsi_pkt_lock);
1752 del_timer_sync(&fsp->timer);
1753 spin_lock_bh(&fsp->scsi_pkt_lock);
1754 }
1755
1756 lp = fsp->lp;
1757 si = fc_get_scsi_internal(lp);
1758 spin_lock_irqsave(lp->host->host_lock, flags);
1759 if (!fsp->cmd) {
1760 spin_unlock_irqrestore(lp->host->host_lock, flags);
1761 return;
1762 }
1763
1764 /*
1765 * if a command timed out while we had to try and throttle IO
1766 * and it is now getting cleaned up, then we are about to
1767 * try again so clear the throttled flag incase we get more
1768 * time outs.
1769 */
1770 if (si->throttled && fsp->state & FC_SRB_NOMEM)
1771 si->throttled = 0;
1772
1773 sc_cmd = fsp->cmd;
1774 fsp->cmd = NULL;
1775
1776 if (!sc_cmd->SCp.ptr) {
1777 spin_unlock_irqrestore(lp->host->host_lock, flags);
1778 return;
1779 }
1780
1781 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1782 switch (fsp->status_code) {
1783 case FC_COMPLETE:
1784 if (fsp->cdb_status == 0) {
1785 /*
1786 * good I/O status
1787 */
1788 sc_cmd->result = DID_OK << 16;
1789 if (fsp->scsi_resid)
1790 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1791 } else if (fsp->cdb_status == QUEUE_FULL) {
1792 struct scsi_device *tmp_sdev;
1793 struct scsi_device *sdev = sc_cmd->device;
1794
1795 shost_for_each_device(tmp_sdev, sdev->host) {
1796 if (tmp_sdev->id != sdev->id)
1797 continue;
1798
1799 if (tmp_sdev->queue_depth > 1) {
1800 scsi_track_queue_full(tmp_sdev,
1801 tmp_sdev->
1802 queue_depth - 1);
1803 }
1804 }
1805 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1806 } else {
1807 /*
1808 * transport level I/O was ok but scsi
1809 * has non zero status
1810 */
1811 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1812 }
1813 break;
1814 case FC_ERROR:
1815 sc_cmd->result = DID_ERROR << 16;
1816 break;
1817 case FC_DATA_UNDRUN:
1818 if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
1819 /*
1820 * scsi status is good but transport level
1821 * underrun.
1822 */
1823 sc_cmd->result = DID_OK << 16;
1824 } else {
1825 /*
1826 * scsi got underrun, this is an error
1827 */
1828 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1829 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1830 }
1831 break;
1832 case FC_DATA_OVRRUN:
1833 /*
1834 * overrun is an error
1835 */
1836 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1837 break;
1838 case FC_CMD_ABORTED:
1839 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
1840 break;
1841 case FC_CMD_TIME_OUT:
1842 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
1843 break;
1844 case FC_CMD_RESET:
1845 sc_cmd->result = (DID_RESET << 16);
1846 break;
1847 case FC_HRD_ERROR:
1848 sc_cmd->result = (DID_NO_CONNECT << 16);
1849 break;
1850 default:
1851 sc_cmd->result = (DID_ERROR << 16);
1852 break;
1853 }
1854
1855 list_del(&fsp->list);
1856 sc_cmd->SCp.ptr = NULL;
1857 sc_cmd->scsi_done(sc_cmd);
1858 spin_unlock_irqrestore(lp->host->host_lock, flags);
1859
1860 /* release ref from initial allocation in queue command */
1861 fc_fcp_pkt_release(fsp);
1862 }
1863
1864 /**
1865 * fc_fcp_complete() - complete processing of a fcp packet
1866 * @fsp: fcp packet
1867 *
1868 * This function may sleep if a fsp timer is pending.
1869 * The host lock must not be held by caller.
1870 */
1871 void fc_fcp_complete(struct fc_fcp_pkt *fsp)
1872 {
1873 if (fc_fcp_lock_pkt(fsp))
1874 return;
1875
1876 fc_fcp_complete_locked(fsp);
1877 fc_fcp_unlock_pkt(fsp);
1878 }
1879 EXPORT_SYMBOL(fc_fcp_complete);
1880
1881 /**
1882 * fc_eh_abort() - Abort a command
1883 * @sc_cmd: scsi command to abort
1884 *
1885 * From scsi host template.
1886 * send ABTS to the target device and wait for the response
1887 * sc_cmd is the pointer to the command to be aborted.
1888 */
1889 int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1890 {
1891 struct fc_fcp_pkt *fsp;
1892 struct fc_lport *lp;
1893 int rc = FAILED;
1894 unsigned long flags;
1895
1896 lp = shost_priv(sc_cmd->device->host);
1897 if (lp->state != LPORT_ST_READY)
1898 return rc;
1899 else if (!lp->link_up)
1900 return rc;
1901
1902 spin_lock_irqsave(lp->host->host_lock, flags);
1903 fsp = CMD_SP(sc_cmd);
1904 if (!fsp) {
1905 /* command completed while scsi eh was setting up */
1906 spin_unlock_irqrestore(lp->host->host_lock, flags);
1907 return SUCCESS;
1908 }
1909 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1910 fc_fcp_pkt_hold(fsp);
1911 spin_unlock_irqrestore(lp->host->host_lock, flags);
1912
1913 if (fc_fcp_lock_pkt(fsp)) {
1914 /* completed while we were waiting for timer to be deleted */
1915 rc = SUCCESS;
1916 goto release_pkt;
1917 }
1918
1919 rc = fc_fcp_pkt_abort(lp, fsp);
1920 fc_fcp_unlock_pkt(fsp);
1921
1922 release_pkt:
1923 fc_fcp_pkt_release(fsp);
1924 return rc;
1925 }
1926 EXPORT_SYMBOL(fc_eh_abort);
1927
1928 /**
1929 * fc_eh_device_reset() Reset a single LUN
1930 * @sc_cmd: scsi command
1931 *
1932 * Set from scsi host template to send tm cmd to the target and wait for the
1933 * response.
1934 */
1935 int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1936 {
1937 struct fc_lport *lp;
1938 struct fc_fcp_pkt *fsp;
1939 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1940 int rc = FAILED;
1941 struct fc_rport_libfc_priv *rp;
1942 int rval;
1943
1944 rval = fc_remote_port_chkready(rport);
1945 if (rval)
1946 goto out;
1947
1948 rp = rport->dd_data;
1949 lp = shost_priv(sc_cmd->device->host);
1950
1951 if (lp->state != LPORT_ST_READY)
1952 return rc;
1953
1954 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
1955 if (fsp == NULL) {
1956 FC_DBG("could not allocate scsi_pkt\n");
1957 sc_cmd->result = DID_NO_CONNECT << 16;
1958 goto out;
1959 }
1960
1961 /*
1962 * Build the libfc request pkt. Do not set the scsi cmnd, because
1963 * the sc passed in is not setup for execution like when sent
1964 * through the queuecommand callout.
1965 */
1966 fsp->lp = lp; /* save the softc ptr */
1967 fsp->rport = rport; /* set the remote port ptr */
1968
1969 /*
1970 * flush outstanding commands
1971 */
1972 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
1973 fsp->state = FC_SRB_FREE;
1974 fc_fcp_pkt_release(fsp);
1975
1976 out:
1977 return rc;
1978 }
1979 EXPORT_SYMBOL(fc_eh_device_reset);
1980
1981 /**
1982 * fc_eh_host_reset() - The reset function will reset the ports on the host.
1983 * @sc_cmd: scsi command
1984 */
1985 int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
1986 {
1987 struct Scsi_Host *shost = sc_cmd->device->host;
1988 struct fc_lport *lp = shost_priv(shost);
1989 unsigned long wait_tmo;
1990
1991 lp->tt.lport_reset(lp);
1992 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
1993 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
1994 msleep(1000);
1995
1996 if (fc_fcp_lport_queue_ready(lp)) {
1997 shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
1998 return SUCCESS;
1999 } else {
2000 shost_printk(KERN_INFO, shost, "Host reset failed. "
2001 "lport not ready.\n");
2002 return FAILED;
2003 }
2004 }
2005 EXPORT_SYMBOL(fc_eh_host_reset);
2006
2007 /**
2008 * fc_slave_alloc() - configure queue depth
2009 * @sdev: scsi device
2010 *
2011 * Configures queue depth based on host's cmd_per_len. If not set
2012 * then we use the libfc default.
2013 */
2014 int fc_slave_alloc(struct scsi_device *sdev)
2015 {
2016 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2017 int queue_depth;
2018
2019 if (!rport || fc_remote_port_chkready(rport))
2020 return -ENXIO;
2021
2022 if (sdev->tagged_supported) {
2023 if (sdev->host->hostt->cmd_per_lun)
2024 queue_depth = sdev->host->hostt->cmd_per_lun;
2025 else
2026 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
2027 scsi_activate_tcq(sdev, queue_depth);
2028 }
2029 return 0;
2030 }
2031 EXPORT_SYMBOL(fc_slave_alloc);
2032
2033 int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2034 {
2035 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2036 return sdev->queue_depth;
2037 }
2038 EXPORT_SYMBOL(fc_change_queue_depth);
2039
2040 int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2041 {
2042 if (sdev->tagged_supported) {
2043 scsi_set_tag_type(sdev, tag_type);
2044 if (tag_type)
2045 scsi_activate_tcq(sdev, sdev->queue_depth);
2046 else
2047 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2048 } else
2049 tag_type = 0;
2050
2051 return tag_type;
2052 }
2053 EXPORT_SYMBOL(fc_change_queue_type);
2054
2055 void fc_fcp_destroy(struct fc_lport *lp)
2056 {
2057 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
2058
2059 if (!list_empty(&si->scsi_pkt_queue))
2060 printk(KERN_ERR "Leaked scsi packets.\n");
2061
2062 mempool_destroy(si->scsi_pkt_pool);
2063 kfree(si);
2064 lp->scsi_priv = NULL;
2065 }
2066 EXPORT_SYMBOL(fc_fcp_destroy);
2067
2068 int fc_fcp_init(struct fc_lport *lp)
2069 {
2070 int rc;
2071 struct fc_fcp_internal *si;
2072
2073 if (!lp->tt.fcp_cmd_send)
2074 lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
2075
2076 if (!lp->tt.fcp_cleanup)
2077 lp->tt.fcp_cleanup = fc_fcp_cleanup;
2078
2079 if (!lp->tt.fcp_abort_io)
2080 lp->tt.fcp_abort_io = fc_fcp_abort_io;
2081
2082 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
2083 if (!si)
2084 return -ENOMEM;
2085 lp->scsi_priv = si;
2086 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2087
2088 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2089 if (!si->scsi_pkt_pool) {
2090 rc = -ENOMEM;
2091 goto free_internal;
2092 }
2093 return 0;
2094
2095 free_internal:
2096 kfree(si);
2097 return rc;
2098 }
2099 EXPORT_SYMBOL(fc_fcp_init);
2100
2101 static int __init libfc_init(void)
2102 {
2103 int rc;
2104
2105 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2106 sizeof(struct fc_fcp_pkt),
2107 0, SLAB_HWCACHE_ALIGN, NULL);
2108 if (scsi_pkt_cachep == NULL) {
2109 FC_DBG("Unable to allocate SRB cache...module load failed!");
2110 return -ENOMEM;
2111 }
2112
2113 rc = fc_setup_exch_mgr();
2114 if (rc)
2115 goto destroy_pkt_cache;
2116
2117 rc = fc_setup_rport();
2118 if (rc)
2119 goto destroy_em;
2120
2121 return rc;
2122 destroy_em:
2123 fc_destroy_exch_mgr();
2124 destroy_pkt_cache:
2125 kmem_cache_destroy(scsi_pkt_cachep);
2126 return rc;
2127 }
2128
2129 static void __exit libfc_exit(void)
2130 {
2131 kmem_cache_destroy(scsi_pkt_cachep);
2132 fc_destroy_exch_mgr();
2133 fc_destroy_rport();
2134 }
2135
2136 module_init(libfc_init);
2137 module_exit(libfc_exit);