]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame_incremental - drivers/scsi/iscsi_tcp.c
[SCSI] libiscsi: merge iscsi_mgmt_task and iscsi_cmd_task
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / iscsi_tcp.c
... / ...
CommitLineData
1/*
2 * iSCSI Initiator over TCP/IP Data-Path
3 *
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 - 2006 Mike Christie
7 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
8 * maintained by open-iscsi@googlegroups.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published
12 * by the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * See the file COPYING included with this distribution for more details.
21 *
22 * Credits:
23 * Christoph Hellwig
24 * FUJITA Tomonori
25 * Arne Redlich
26 * Zhenyu Wang
27 */
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/inet.h>
32#include <linux/file.h>
33#include <linux/blkdev.h>
34#include <linux/crypto.h>
35#include <linux/delay.h>
36#include <linux/kfifo.h>
37#include <linux/scatterlist.h>
38#include <net/tcp.h>
39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_host.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_transport_iscsi.h>
44
45#include "iscsi_tcp.h"
46
47MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
48 "Alex Aizman <itn780@yahoo.com>");
49MODULE_DESCRIPTION("iSCSI/TCP data-path");
50MODULE_LICENSE("GPL");
51#undef DEBUG_TCP
52#define DEBUG_ASSERT
53
54#ifdef DEBUG_TCP
55#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
56#else
57#define debug_tcp(fmt...)
58#endif
59
60#ifndef DEBUG_ASSERT
61#ifdef BUG_ON
62#undef BUG_ON
63#endif
64#define BUG_ON(expr)
65#endif
66
67static struct scsi_transport_template *iscsi_tcp_scsi_transport;
68static struct scsi_host_template iscsi_sht;
69static struct iscsi_transport iscsi_tcp_transport;
70
71static unsigned int iscsi_max_lun = 512;
72module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
73
74static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
75 struct iscsi_segment *segment);
76
77/*
78 * Scatterlist handling: inside the iscsi_segment, we
79 * remember an index into the scatterlist, and set data/size
80 * to the current scatterlist entry. For highmem pages, we
81 * kmap as needed.
82 *
83 * Note that the page is unmapped when we return from
84 * TCP's data_ready handler, so we may end up mapping and
85 * unmapping the same page repeatedly. The whole reason
86 * for this is that we shouldn't keep the page mapped
87 * outside the softirq.
88 */
89
90/**
91 * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
92 * @segment: the buffer object
93 * @sg: scatterlist
94 * @offset: byte offset into that sg entry
95 *
96 * This function sets up the segment so that subsequent
97 * data is copied to the indicated sg entry, at the given
98 * offset.
99 */
100static inline void
101iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
102 struct scatterlist *sg, unsigned int offset)
103{
104 segment->sg = sg;
105 segment->sg_offset = offset;
106 segment->size = min(sg->length - offset,
107 segment->total_size - segment->total_copied);
108 segment->data = NULL;
109}
110
111/**
112 * iscsi_tcp_segment_map - map the current S/G page
113 * @segment: iscsi_segment
114 * @recv: 1 if called from recv path
115 *
116 * We only need to possibly kmap data if scatter lists are being used,
117 * because the iscsi passthrough and internal IO paths will never use high
118 * mem pages.
119 */
120static inline void
121iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
122{
123 struct scatterlist *sg;
124
125 if (segment->data != NULL || !segment->sg)
126 return;
127
128 sg = segment->sg;
129 BUG_ON(segment->sg_mapped);
130 BUG_ON(sg->length == 0);
131
132 /*
133 * If the page count is greater than one it is ok to send
134 * to the network layer's zero copy send path. If not we
135 * have to go the slow sendmsg path. We always map for the
136 * recv path.
137 */
138 if (page_count(sg_page(sg)) >= 1 && !recv)
139 return;
140
141 debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
142 segment);
143 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
144 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
145}
146
147static inline void
148iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
149{
150 debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
151
152 if (segment->sg_mapped) {
153 debug_tcp("iscsi_tcp_segment_unmap valid\n");
154 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
155 segment->sg_mapped = NULL;
156 segment->data = NULL;
157 }
158}
159
160/*
161 * Splice the digest buffer into the buffer
162 */
163static inline void
164iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
165{
166 segment->data = digest;
167 segment->digest_len = ISCSI_DIGEST_SIZE;
168 segment->total_size += ISCSI_DIGEST_SIZE;
169 segment->size = ISCSI_DIGEST_SIZE;
170 segment->copied = 0;
171 segment->sg = NULL;
172 segment->hash = NULL;
173}
174
175/**
176 * iscsi_tcp_segment_done - check whether the segment is complete
177 * @segment: iscsi segment to check
178 * @recv: set to one of this is called from the recv path
179 * @copied: number of bytes copied
180 *
181 * Check if we're done receiving this segment. If the receive
182 * buffer is full but we expect more data, move on to the
183 * next entry in the scatterlist.
184 *
185 * If the amount of data we received isn't a multiple of 4,
186 * we will transparently receive the pad bytes, too.
187 *
188 * This function must be re-entrant.
189 */
190static inline int
191iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
192{
193 static unsigned char padbuf[ISCSI_PAD_LEN];
194 struct scatterlist sg;
195 unsigned int pad;
196
197 debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
198 segment->size, recv ? "recv" : "xmit");
199 if (segment->hash && copied) {
200 /*
201 * If a segment is kmapd we must unmap it before sending
202 * to the crypto layer since that will try to kmap it again.
203 */
204 iscsi_tcp_segment_unmap(segment);
205
206 if (!segment->data) {
207 sg_init_table(&sg, 1);
208 sg_set_page(&sg, sg_page(segment->sg), copied,
209 segment->copied + segment->sg_offset +
210 segment->sg->offset);
211 } else
212 sg_init_one(&sg, segment->data + segment->copied,
213 copied);
214 crypto_hash_update(segment->hash, &sg, copied);
215 }
216
217 segment->copied += copied;
218 if (segment->copied < segment->size) {
219 iscsi_tcp_segment_map(segment, recv);
220 return 0;
221 }
222
223 segment->total_copied += segment->copied;
224 segment->copied = 0;
225 segment->size = 0;
226
227 /* Unmap the current scatterlist page, if there is one. */
228 iscsi_tcp_segment_unmap(segment);
229
230 /* Do we have more scatterlist entries? */
231 debug_tcp("total copied %u total size %u\n", segment->total_copied,
232 segment->total_size);
233 if (segment->total_copied < segment->total_size) {
234 /* Proceed to the next entry in the scatterlist. */
235 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
236 0);
237 iscsi_tcp_segment_map(segment, recv);
238 BUG_ON(segment->size == 0);
239 return 0;
240 }
241
242 /* Do we need to handle padding? */
243 pad = iscsi_padding(segment->total_copied);
244 if (pad != 0) {
245 debug_tcp("consume %d pad bytes\n", pad);
246 segment->total_size += pad;
247 segment->size = pad;
248 segment->data = padbuf;
249 return 0;
250 }
251
252 /*
253 * Set us up for transferring the data digest. hdr digest
254 * is completely handled in hdr done function.
255 */
256 if (segment->hash) {
257 crypto_hash_final(segment->hash, segment->digest);
258 iscsi_tcp_segment_splice_digest(segment,
259 recv ? segment->recv_digest : segment->digest);
260 return 0;
261 }
262
263 return 1;
264}
265
266/**
267 * iscsi_tcp_xmit_segment - transmit segment
268 * @tcp_conn: the iSCSI TCP connection
269 * @segment: the buffer to transmnit
270 *
271 * This function transmits as much of the buffer as
272 * the network layer will accept, and returns the number of
273 * bytes transmitted.
274 *
275 * If CRC hashing is enabled, the function will compute the
276 * hash as it goes. When the entire segment has been transmitted,
277 * it will retrieve the hash value and send it as well.
278 */
279static int
280iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
281 struct iscsi_segment *segment)
282{
283 struct socket *sk = tcp_conn->sock;
284 unsigned int copied = 0;
285 int r = 0;
286
287 while (!iscsi_tcp_segment_done(segment, 0, r)) {
288 struct scatterlist *sg;
289 unsigned int offset, copy;
290 int flags = 0;
291
292 r = 0;
293 offset = segment->copied;
294 copy = segment->size - offset;
295
296 if (segment->total_copied + segment->size < segment->total_size)
297 flags |= MSG_MORE;
298
299 /* Use sendpage if we can; else fall back to sendmsg */
300 if (!segment->data) {
301 sg = segment->sg;
302 offset += segment->sg_offset + sg->offset;
303 r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
304 flags);
305 } else {
306 struct msghdr msg = { .msg_flags = flags };
307 struct kvec iov = {
308 .iov_base = segment->data + offset,
309 .iov_len = copy
310 };
311
312 r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
313 }
314
315 if (r < 0) {
316 iscsi_tcp_segment_unmap(segment);
317 if (copied || r == -EAGAIN)
318 break;
319 return r;
320 }
321 copied += r;
322 }
323 return copied;
324}
325
326/**
327 * iscsi_tcp_segment_recv - copy data to segment
328 * @tcp_conn: the iSCSI TCP connection
329 * @segment: the buffer to copy to
330 * @ptr: data pointer
331 * @len: amount of data available
332 *
333 * This function copies up to @len bytes to the
334 * given buffer, and returns the number of bytes
335 * consumed, which can actually be less than @len.
336 *
337 * If hash digest is enabled, the function will update the
338 * hash while copying.
339 * Combining these two operations doesn't buy us a lot (yet),
340 * but in the future we could implement combined copy+crc,
341 * just way we do for network layer checksums.
342 */
343static int
344iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
345 struct iscsi_segment *segment, const void *ptr,
346 unsigned int len)
347{
348 unsigned int copy = 0, copied = 0;
349
350 while (!iscsi_tcp_segment_done(segment, 1, copy)) {
351 if (copied == len) {
352 debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
353 len);
354 break;
355 }
356
357 copy = min(len - copied, segment->size - segment->copied);
358 debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
359 memcpy(segment->data + segment->copied, ptr + copied, copy);
360 copied += copy;
361 }
362 return copied;
363}
364
365static inline void
366iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
367 unsigned char digest[ISCSI_DIGEST_SIZE])
368{
369 struct scatterlist sg;
370
371 sg_init_one(&sg, hdr, hdrlen);
372 crypto_hash_digest(hash, &sg, hdrlen, digest);
373}
374
375static inline int
376iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
377 struct iscsi_segment *segment)
378{
379 if (!segment->digest_len)
380 return 1;
381
382 if (memcmp(segment->recv_digest, segment->digest,
383 segment->digest_len)) {
384 debug_scsi("digest mismatch\n");
385 return 0;
386 }
387
388 return 1;
389}
390
391/*
392 * Helper function to set up segment buffer
393 */
394static inline void
395__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
396 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
397{
398 memset(segment, 0, sizeof(*segment));
399 segment->total_size = size;
400 segment->done = done;
401
402 if (hash) {
403 segment->hash = hash;
404 crypto_hash_init(hash);
405 }
406}
407
408static inline void
409iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
410 size_t size, iscsi_segment_done_fn_t *done,
411 struct hash_desc *hash)
412{
413 __iscsi_segment_init(segment, size, done, hash);
414 segment->data = data;
415 segment->size = size;
416}
417
418static inline int
419iscsi_segment_seek_sg(struct iscsi_segment *segment,
420 struct scatterlist *sg_list, unsigned int sg_count,
421 unsigned int offset, size_t size,
422 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
423{
424 struct scatterlist *sg;
425 unsigned int i;
426
427 debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
428 offset, size);
429 __iscsi_segment_init(segment, size, done, hash);
430 for_each_sg(sg_list, sg, sg_count, i) {
431 debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
432 sg->offset);
433 if (offset < sg->length) {
434 iscsi_tcp_segment_init_sg(segment, sg, offset);
435 return 0;
436 }
437 offset -= sg->length;
438 }
439
440 return ISCSI_ERR_DATA_OFFSET;
441}
442
443/**
444 * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
445 * @tcp_conn: iscsi connection to prep for
446 *
447 * This function always passes NULL for the hash argument, because when this
448 * function is called we do not yet know the final size of the header and want
449 * to delay the digest processing until we know that.
450 */
451static void
452iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
453{
454 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
455 tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
456 iscsi_segment_init_linear(&tcp_conn->in.segment,
457 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
458 iscsi_tcp_hdr_recv_done, NULL);
459}
460
461/*
462 * Handle incoming reply to any other type of command
463 */
464static int
465iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
466 struct iscsi_segment *segment)
467{
468 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
469 int rc = 0;
470
471 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
472 return ISCSI_ERR_DATA_DGST;
473
474 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
475 conn->data, tcp_conn->in.datalen);
476 if (rc)
477 return rc;
478
479 iscsi_tcp_hdr_recv_prep(tcp_conn);
480 return 0;
481}
482
483static void
484iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
485{
486 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
487 struct hash_desc *rx_hash = NULL;
488
489 if (conn->datadgst_en)
490 rx_hash = &tcp_conn->rx_hash;
491
492 iscsi_segment_init_linear(&tcp_conn->in.segment,
493 conn->data, tcp_conn->in.datalen,
494 iscsi_tcp_data_recv_done, rx_hash);
495}
496
497/*
498 * must be called with session lock
499 */
500static void
501iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
502{
503 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
504 struct iscsi_r2t_info *r2t;
505
506 /* flush ctask's r2t queues */
507 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
508 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
509 sizeof(void*));
510 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
511 }
512
513 r2t = tcp_ctask->r2t;
514 if (r2t != NULL) {
515 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
516 sizeof(void*));
517 tcp_ctask->r2t = NULL;
518 }
519}
520
521/**
522 * iscsi_data_rsp - SCSI Data-In Response processing
523 * @conn: iscsi connection
524 * @ctask: scsi command task
525 **/
526static int
527iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
528{
529 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
530 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
531 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
532 struct iscsi_session *session = conn->session;
533 struct scsi_cmnd *sc = ctask->sc;
534 int datasn = be32_to_cpu(rhdr->datasn);
535 unsigned total_in_length = scsi_in(sc)->length;
536
537 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
538 if (tcp_conn->in.datalen == 0)
539 return 0;
540
541 if (tcp_ctask->exp_datasn != datasn) {
542 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
543 __FUNCTION__, tcp_ctask->exp_datasn, datasn);
544 return ISCSI_ERR_DATASN;
545 }
546
547 tcp_ctask->exp_datasn++;
548
549 tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
550 if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) {
551 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
552 __FUNCTION__, tcp_ctask->data_offset,
553 tcp_conn->in.datalen, total_in_length);
554 return ISCSI_ERR_DATA_OFFSET;
555 }
556
557 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
558 sc->result = (DID_OK << 16) | rhdr->cmd_status;
559 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
560 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
561 ISCSI_FLAG_DATA_OVERFLOW)) {
562 int res_count = be32_to_cpu(rhdr->residual_count);
563
564 if (res_count > 0 &&
565 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
566 res_count <= total_in_length))
567 scsi_in(sc)->resid = res_count;
568 else
569 sc->result = (DID_BAD_TARGET << 16) |
570 rhdr->cmd_status;
571 }
572 }
573
574 conn->datain_pdus_cnt++;
575 return 0;
576}
577
578/**
579 * iscsi_solicit_data_init - initialize first Data-Out
580 * @conn: iscsi connection
581 * @ctask: scsi command task
582 * @r2t: R2T info
583 *
584 * Notes:
585 * Initialize first Data-Out within this R2T sequence and finds
586 * proper data_offset within this SCSI command.
587 *
588 * This function is called with connection lock taken.
589 **/
590static void
591iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
592 struct iscsi_r2t_info *r2t)
593{
594 struct iscsi_data *hdr;
595
596 hdr = &r2t->dtask.hdr;
597 memset(hdr, 0, sizeof(struct iscsi_data));
598 hdr->ttt = r2t->ttt;
599 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
600 r2t->solicit_datasn++;
601 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
602 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
603 hdr->itt = ctask->hdr->itt;
604 hdr->exp_statsn = r2t->exp_statsn;
605 hdr->offset = cpu_to_be32(r2t->data_offset);
606 if (r2t->data_length > conn->max_xmit_dlength) {
607 hton24(hdr->dlength, conn->max_xmit_dlength);
608 r2t->data_count = conn->max_xmit_dlength;
609 hdr->flags = 0;
610 } else {
611 hton24(hdr->dlength, r2t->data_length);
612 r2t->data_count = r2t->data_length;
613 hdr->flags = ISCSI_FLAG_CMD_FINAL;
614 }
615 conn->dataout_pdus_cnt++;
616
617 r2t->sent = 0;
618}
619
620/**
621 * iscsi_r2t_rsp - iSCSI R2T Response processing
622 * @conn: iscsi connection
623 * @ctask: scsi command task
624 **/
625static int
626iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
627{
628 struct iscsi_r2t_info *r2t;
629 struct iscsi_session *session = conn->session;
630 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
631 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
632 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
633 int r2tsn = be32_to_cpu(rhdr->r2tsn);
634 int rc;
635
636 if (tcp_conn->in.datalen) {
637 iscsi_conn_printk(KERN_ERR, conn,
638 "invalid R2t with datalen %d\n",
639 tcp_conn->in.datalen);
640 return ISCSI_ERR_DATALEN;
641 }
642
643 if (tcp_ctask->exp_datasn != r2tsn){
644 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
645 __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
646 return ISCSI_ERR_R2TSN;
647 }
648
649 /* fill-in new R2T associated with the task */
650 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
651
652 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
653 iscsi_conn_printk(KERN_INFO, conn,
654 "dropping R2T itt %d in recovery.\n",
655 ctask->itt);
656 return 0;
657 }
658
659 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
660 BUG_ON(!rc);
661
662 r2t->exp_statsn = rhdr->statsn;
663 r2t->data_length = be32_to_cpu(rhdr->data_length);
664 if (r2t->data_length == 0) {
665 iscsi_conn_printk(KERN_ERR, conn,
666 "invalid R2T with zero data len\n");
667 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
668 sizeof(void*));
669 return ISCSI_ERR_DATALEN;
670 }
671
672 if (r2t->data_length > session->max_burst)
673 debug_scsi("invalid R2T with data len %u and max burst %u."
674 "Attempting to execute request.\n",
675 r2t->data_length, session->max_burst);
676
677 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
678 if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) {
679 iscsi_conn_printk(KERN_ERR, conn,
680 "invalid R2T with data len %u at offset %u "
681 "and total length %d\n", r2t->data_length,
682 r2t->data_offset, scsi_out(ctask->sc)->length);
683 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
684 sizeof(void*));
685 return ISCSI_ERR_DATALEN;
686 }
687
688 r2t->ttt = rhdr->ttt; /* no flip */
689 r2t->solicit_datasn = 0;
690
691 iscsi_solicit_data_init(conn, ctask, r2t);
692
693 tcp_ctask->exp_datasn = r2tsn + 1;
694 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
695 conn->r2t_pdus_cnt++;
696
697 iscsi_requeue_ctask(ctask);
698 return 0;
699}
700
701/*
702 * Handle incoming reply to DataIn command
703 */
704static int
705iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
706 struct iscsi_segment *segment)
707{
708 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
709 struct iscsi_hdr *hdr = tcp_conn->in.hdr;
710 int rc;
711
712 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
713 return ISCSI_ERR_DATA_DGST;
714
715 /* check for non-exceptional status */
716 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
717 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
718 if (rc)
719 return rc;
720 }
721
722 iscsi_tcp_hdr_recv_prep(tcp_conn);
723 return 0;
724}
725
726/**
727 * iscsi_tcp_hdr_dissect - process PDU header
728 * @conn: iSCSI connection
729 * @hdr: PDU header
730 *
731 * This function analyzes the header of the PDU received,
732 * and performs several sanity checks. If the PDU is accompanied
733 * by data, the receive buffer is set up to copy the incoming data
734 * to the correct location.
735 */
736static int
737iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
738{
739 int rc = 0, opcode, ahslen;
740 struct iscsi_session *session = conn->session;
741 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
742 struct iscsi_cmd_task *ctask;
743
744 /* verify PDU length */
745 tcp_conn->in.datalen = ntoh24(hdr->dlength);
746 if (tcp_conn->in.datalen > conn->max_recv_dlength) {
747 iscsi_conn_printk(KERN_ERR, conn,
748 "iscsi_tcp: datalen %d > %d\n",
749 tcp_conn->in.datalen, conn->max_recv_dlength);
750 return ISCSI_ERR_DATALEN;
751 }
752
753 /* Additional header segments. So far, we don't
754 * process additional headers.
755 */
756 ahslen = hdr->hlength << 2;
757
758 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
759 /* verify itt (itt encoding: age+cid+itt) */
760 rc = iscsi_verify_itt(conn, hdr->itt);
761 if (rc)
762 return rc;
763
764 debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
765 opcode, ahslen, tcp_conn->in.datalen);
766
767 switch(opcode) {
768 case ISCSI_OP_SCSI_DATA_IN:
769 ctask = iscsi_itt_to_ctask(conn, hdr->itt);
770 if (!ctask)
771 return ISCSI_ERR_BAD_ITT;
772
773 spin_lock(&conn->session->lock);
774 rc = iscsi_data_rsp(conn, ctask);
775 spin_unlock(&conn->session->lock);
776 if (rc)
777 return rc;
778 if (tcp_conn->in.datalen) {
779 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
780 struct hash_desc *rx_hash = NULL;
781 struct scsi_data_buffer *sdb = scsi_in(ctask->sc);
782
783 /*
784 * Setup copy of Data-In into the Scsi_Cmnd
785 * Scatterlist case:
786 * We set up the iscsi_segment to point to the next
787 * scatterlist entry to copy to. As we go along,
788 * we move on to the next scatterlist entry and
789 * update the digest per-entry.
790 */
791 if (conn->datadgst_en)
792 rx_hash = &tcp_conn->rx_hash;
793
794 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
795 "datalen=%d)\n", tcp_conn,
796 tcp_ctask->data_offset,
797 tcp_conn->in.datalen);
798 return iscsi_segment_seek_sg(&tcp_conn->in.segment,
799 sdb->table.sgl,
800 sdb->table.nents,
801 tcp_ctask->data_offset,
802 tcp_conn->in.datalen,
803 iscsi_tcp_process_data_in,
804 rx_hash);
805 }
806 /* fall through */
807 case ISCSI_OP_SCSI_CMD_RSP:
808 if (tcp_conn->in.datalen) {
809 iscsi_tcp_data_recv_prep(tcp_conn);
810 return 0;
811 }
812 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
813 break;
814 case ISCSI_OP_R2T:
815 ctask = iscsi_itt_to_ctask(conn, hdr->itt);
816 if (!ctask)
817 return ISCSI_ERR_BAD_ITT;
818
819 if (ahslen)
820 rc = ISCSI_ERR_AHSLEN;
821 else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
822 spin_lock(&session->lock);
823 rc = iscsi_r2t_rsp(conn, ctask);
824 spin_unlock(&session->lock);
825 } else
826 rc = ISCSI_ERR_PROTO;
827 break;
828 case ISCSI_OP_LOGIN_RSP:
829 case ISCSI_OP_TEXT_RSP:
830 case ISCSI_OP_REJECT:
831 case ISCSI_OP_ASYNC_EVENT:
832 /*
833 * It is possible that we could get a PDU with a buffer larger
834 * than 8K, but there are no targets that currently do this.
835 * For now we fail until we find a vendor that needs it
836 */
837 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
838 iscsi_conn_printk(KERN_ERR, conn,
839 "iscsi_tcp: received buffer of "
840 "len %u but conn buffer is only %u "
841 "(opcode %0x)\n",
842 tcp_conn->in.datalen,
843 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
844 rc = ISCSI_ERR_PROTO;
845 break;
846 }
847
848 /* If there's data coming in with the response,
849 * receive it to the connection's buffer.
850 */
851 if (tcp_conn->in.datalen) {
852 iscsi_tcp_data_recv_prep(tcp_conn);
853 return 0;
854 }
855 /* fall through */
856 case ISCSI_OP_LOGOUT_RSP:
857 case ISCSI_OP_NOOP_IN:
858 case ISCSI_OP_SCSI_TMFUNC_RSP:
859 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
860 break;
861 default:
862 rc = ISCSI_ERR_BAD_OPCODE;
863 break;
864 }
865
866 if (rc == 0) {
867 /* Anything that comes with data should have
868 * been handled above. */
869 if (tcp_conn->in.datalen)
870 return ISCSI_ERR_PROTO;
871 iscsi_tcp_hdr_recv_prep(tcp_conn);
872 }
873
874 return rc;
875}
876
877/**
878 * iscsi_tcp_hdr_recv_done - process PDU header
879 *
880 * This is the callback invoked when the PDU header has
881 * been received. If the header is followed by additional
882 * header segments, we go back for more data.
883 */
884static int
885iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
886 struct iscsi_segment *segment)
887{
888 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
889 struct iscsi_hdr *hdr;
890
891 /* Check if there are additional header segments
892 * *prior* to computing the digest, because we
893 * may need to go back to the caller for more.
894 */
895 hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
896 if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
897 /* Bump the header length - the caller will
898 * just loop around and get the AHS for us, and
899 * call again. */
900 unsigned int ahslen = hdr->hlength << 2;
901
902 /* Make sure we don't overflow */
903 if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
904 return ISCSI_ERR_AHSLEN;
905
906 segment->total_size += ahslen;
907 segment->size += ahslen;
908 return 0;
909 }
910
911 /* We're done processing the header. See if we're doing
912 * header digests; if so, set up the recv_digest buffer
913 * and go back for more. */
914 if (conn->hdrdgst_en) {
915 if (segment->digest_len == 0) {
916 iscsi_tcp_segment_splice_digest(segment,
917 segment->recv_digest);
918 return 0;
919 }
920 iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
921 segment->total_copied - ISCSI_DIGEST_SIZE,
922 segment->digest);
923
924 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
925 return ISCSI_ERR_HDR_DGST;
926 }
927
928 tcp_conn->in.hdr = hdr;
929 return iscsi_tcp_hdr_dissect(conn, hdr);
930}
931
932/**
933 * iscsi_tcp_recv - TCP receive in sendfile fashion
934 * @rd_desc: read descriptor
935 * @skb: socket buffer
936 * @offset: offset in skb
937 * @len: skb->len - offset
938 **/
939static int
940iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
941 unsigned int offset, size_t len)
942{
943 struct iscsi_conn *conn = rd_desc->arg.data;
944 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
945 struct iscsi_segment *segment = &tcp_conn->in.segment;
946 struct skb_seq_state seq;
947 unsigned int consumed = 0;
948 int rc = 0;
949
950 debug_tcp("in %d bytes\n", skb->len - offset);
951
952 if (unlikely(conn->suspend_rx)) {
953 debug_tcp("conn %d Rx suspended!\n", conn->id);
954 return 0;
955 }
956
957 skb_prepare_seq_read(skb, offset, skb->len, &seq);
958 while (1) {
959 unsigned int avail;
960 const u8 *ptr;
961
962 avail = skb_seq_read(consumed, &ptr, &seq);
963 if (avail == 0) {
964 debug_tcp("no more data avail. Consumed %d\n",
965 consumed);
966 break;
967 }
968 BUG_ON(segment->copied >= segment->size);
969
970 debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
971 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
972 BUG_ON(rc == 0);
973 consumed += rc;
974
975 if (segment->total_copied >= segment->total_size) {
976 debug_tcp("segment done\n");
977 rc = segment->done(tcp_conn, segment);
978 if (rc != 0) {
979 skb_abort_seq_read(&seq);
980 goto error;
981 }
982
983 /* The done() functions sets up the
984 * next segment. */
985 }
986 }
987 skb_abort_seq_read(&seq);
988 conn->rxdata_octets += consumed;
989 return consumed;
990
991error:
992 debug_tcp("Error receiving PDU, errno=%d\n", rc);
993 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
994 return 0;
995}
996
997static void
998iscsi_tcp_data_ready(struct sock *sk, int flag)
999{
1000 struct iscsi_conn *conn = sk->sk_user_data;
1001 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1002 read_descriptor_t rd_desc;
1003
1004 read_lock(&sk->sk_callback_lock);
1005
1006 /*
1007 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
1008 * We set count to 1 because we want the network layer to
1009 * hand us all the skbs that are available. iscsi_tcp_recv
1010 * handled pdus that cross buffers or pdus that still need data.
1011 */
1012 rd_desc.arg.data = conn;
1013 rd_desc.count = 1;
1014 tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
1015
1016 read_unlock(&sk->sk_callback_lock);
1017
1018 /* If we had to (atomically) map a highmem page,
1019 * unmap it now. */
1020 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
1021}
1022
1023static void
1024iscsi_tcp_state_change(struct sock *sk)
1025{
1026 struct iscsi_tcp_conn *tcp_conn;
1027 struct iscsi_conn *conn;
1028 struct iscsi_session *session;
1029 void (*old_state_change)(struct sock *);
1030
1031 read_lock(&sk->sk_callback_lock);
1032
1033 conn = (struct iscsi_conn*)sk->sk_user_data;
1034 session = conn->session;
1035
1036 if ((sk->sk_state == TCP_CLOSE_WAIT ||
1037 sk->sk_state == TCP_CLOSE) &&
1038 !atomic_read(&sk->sk_rmem_alloc)) {
1039 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1040 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1041 }
1042
1043 tcp_conn = conn->dd_data;
1044 old_state_change = tcp_conn->old_state_change;
1045
1046 read_unlock(&sk->sk_callback_lock);
1047
1048 old_state_change(sk);
1049}
1050
1051/**
1052 * iscsi_write_space - Called when more output buffer space is available
1053 * @sk: socket space is available for
1054 **/
1055static void
1056iscsi_write_space(struct sock *sk)
1057{
1058 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1059 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1060
1061 tcp_conn->old_write_space(sk);
1062 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1063 scsi_queue_work(conn->session->host, &conn->xmitwork);
1064}
1065
1066static void
1067iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1068{
1069 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1070 struct sock *sk = tcp_conn->sock->sk;
1071
1072 /* assign new callbacks */
1073 write_lock_bh(&sk->sk_callback_lock);
1074 sk->sk_user_data = conn;
1075 tcp_conn->old_data_ready = sk->sk_data_ready;
1076 tcp_conn->old_state_change = sk->sk_state_change;
1077 tcp_conn->old_write_space = sk->sk_write_space;
1078 sk->sk_data_ready = iscsi_tcp_data_ready;
1079 sk->sk_state_change = iscsi_tcp_state_change;
1080 sk->sk_write_space = iscsi_write_space;
1081 write_unlock_bh(&sk->sk_callback_lock);
1082}
1083
1084static void
1085iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1086{
1087 struct sock *sk = tcp_conn->sock->sk;
1088
1089 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1090 write_lock_bh(&sk->sk_callback_lock);
1091 sk->sk_user_data = NULL;
1092 sk->sk_data_ready = tcp_conn->old_data_ready;
1093 sk->sk_state_change = tcp_conn->old_state_change;
1094 sk->sk_write_space = tcp_conn->old_write_space;
1095 sk->sk_no_check = 0;
1096 write_unlock_bh(&sk->sk_callback_lock);
1097}
1098
1099/**
1100 * iscsi_xmit - TCP transmit
1101 **/
1102static int
1103iscsi_xmit(struct iscsi_conn *conn)
1104{
1105 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1106 struct iscsi_segment *segment = &tcp_conn->out.segment;
1107 unsigned int consumed = 0;
1108 int rc = 0;
1109
1110 while (1) {
1111 rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
1112 if (rc < 0)
1113 goto error;
1114 if (rc == 0)
1115 break;
1116
1117 consumed += rc;
1118
1119 if (segment->total_copied >= segment->total_size) {
1120 if (segment->done != NULL) {
1121 rc = segment->done(tcp_conn, segment);
1122 if (rc < 0)
1123 goto error;
1124 }
1125 }
1126 }
1127
1128 debug_tcp("xmit %d bytes\n", consumed);
1129
1130 conn->txdata_octets += consumed;
1131 return consumed;
1132
1133error:
1134 /* Transmit error. We could initiate error recovery
1135 * here. */
1136 debug_tcp("Error sending PDU, errno=%d\n", rc);
1137 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1138 return rc;
1139}
1140
1141/**
1142 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
1143 */
1144static inline int
1145iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
1146{
1147 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1148 struct iscsi_segment *segment = &tcp_conn->out.segment;
1149
1150 return segment->total_copied - segment->total_size;
1151}
1152
1153static inline int
1154iscsi_tcp_flush(struct iscsi_conn *conn)
1155{
1156 int rc;
1157
1158 while (iscsi_tcp_xmit_qlen(conn)) {
1159 rc = iscsi_xmit(conn);
1160 if (rc == 0)
1161 return -EAGAIN;
1162 if (rc < 0)
1163 return rc;
1164 }
1165
1166 return 0;
1167}
1168
1169/*
1170 * This is called when we're done sending the header.
1171 * Simply copy the data_segment to the send segment, and return.
1172 */
1173static int
1174iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
1175 struct iscsi_segment *segment)
1176{
1177 tcp_conn->out.segment = tcp_conn->out.data_segment;
1178 debug_tcp("Header done. Next segment size %u total_size %u\n",
1179 tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
1180 return 0;
1181}
1182
1183static void
1184iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1185{
1186 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1187
1188 debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
1189 conn->hdrdgst_en? ", digest enabled" : "");
1190
1191 /* Clear the data segment - needs to be filled in by the
1192 * caller using iscsi_tcp_send_data_prep() */
1193 memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
1194
1195 /* If header digest is enabled, compute the CRC and
1196 * place the digest into the same buffer. We make
1197 * sure that both iscsi_tcp_ctask and mtask have
1198 * sufficient room.
1199 */
1200 if (conn->hdrdgst_en) {
1201 iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
1202 hdr + hdrlen);
1203 hdrlen += ISCSI_DIGEST_SIZE;
1204 }
1205
1206 /* Remember header pointer for later, when we need
1207 * to decide whether there's a payload to go along
1208 * with the header. */
1209 tcp_conn->out.hdr = hdr;
1210
1211 iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
1212 iscsi_tcp_send_hdr_done, NULL);
1213}
1214
1215/*
1216 * Prepare the send buffer for the payload data.
1217 * Padding and checksumming will all be taken care
1218 * of by the iscsi_segment routines.
1219 */
1220static int
1221iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1222 unsigned int count, unsigned int offset,
1223 unsigned int len)
1224{
1225 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1226 struct hash_desc *tx_hash = NULL;
1227 unsigned int hdr_spec_len;
1228
1229 debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
1230 tcp_conn, offset, len,
1231 conn->datadgst_en? ", digest enabled" : "");
1232
1233 /* Make sure the datalen matches what the caller
1234 said he would send. */
1235 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
1236 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1237
1238 if (conn->datadgst_en)
1239 tx_hash = &tcp_conn->tx_hash;
1240
1241 return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
1242 sg, count, offset, len,
1243 NULL, tx_hash);
1244}
1245
1246static void
1247iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1248 size_t len)
1249{
1250 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1251 struct hash_desc *tx_hash = NULL;
1252 unsigned int hdr_spec_len;
1253
1254 debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
1255 conn->datadgst_en? ", digest enabled" : "");
1256
1257 /* Make sure the datalen matches what the caller
1258 said he would send. */
1259 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
1260 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1261
1262 if (conn->datadgst_en)
1263 tx_hash = &tcp_conn->tx_hash;
1264
1265 iscsi_segment_init_linear(&tcp_conn->out.data_segment,
1266 data, len, NULL, tx_hash);
1267}
1268
1269/**
1270 * iscsi_solicit_data_cont - initialize next Data-Out
1271 * @conn: iscsi connection
1272 * @ctask: scsi command task
1273 * @r2t: R2T info
1274 * @left: bytes left to transfer
1275 *
1276 * Notes:
1277 * Initialize next Data-Out within this R2T sequence and continue
1278 * to process next Scatter-Gather element(if any) of this SCSI command.
1279 *
1280 * Called under connection lock.
1281 **/
1282static int
1283iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1284 struct iscsi_r2t_info *r2t)
1285{
1286 struct iscsi_data *hdr;
1287 int new_offset, left;
1288
1289 BUG_ON(r2t->data_length - r2t->sent < 0);
1290 left = r2t->data_length - r2t->sent;
1291 if (left == 0)
1292 return 0;
1293
1294 hdr = &r2t->dtask.hdr;
1295 memset(hdr, 0, sizeof(struct iscsi_data));
1296 hdr->ttt = r2t->ttt;
1297 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1298 r2t->solicit_datasn++;
1299 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1300 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
1301 hdr->itt = ctask->hdr->itt;
1302 hdr->exp_statsn = r2t->exp_statsn;
1303 new_offset = r2t->data_offset + r2t->sent;
1304 hdr->offset = cpu_to_be32(new_offset);
1305 if (left > conn->max_xmit_dlength) {
1306 hton24(hdr->dlength, conn->max_xmit_dlength);
1307 r2t->data_count = conn->max_xmit_dlength;
1308 } else {
1309 hton24(hdr->dlength, left);
1310 r2t->data_count = left;
1311 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1312 }
1313
1314 conn->dataout_pdus_cnt++;
1315 return 1;
1316}
1317
1318/**
1319 * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1320 * @conn: iscsi connection
1321 * @ctask: scsi command task
1322 * @sc: scsi command
1323 **/
1324static int
1325iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
1326{
1327 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1328 struct iscsi_conn *conn = ctask->conn;
1329 struct scsi_cmnd *sc = ctask->sc;
1330 int err;
1331
1332 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
1333 tcp_ctask->sent = 0;
1334 tcp_ctask->exp_datasn = 0;
1335
1336 /* Prepare PDU, optionally w/ immediate data */
1337 debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
1338 conn->id, ctask->itt, ctask->imm_count,
1339 ctask->unsol_count);
1340 iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
1341
1342 if (!ctask->imm_count)
1343 return 0;
1344
1345 /* If we have immediate data, attach a payload */
1346 err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
1347 scsi_out(sc)->table.nents,
1348 0, ctask->imm_count);
1349 if (err)
1350 return err;
1351 tcp_ctask->sent += ctask->imm_count;
1352 ctask->imm_count = 0;
1353 return 0;
1354}
1355
1356/**
1357 * iscsi_tcp_mtask_xmit - xmit management(immediate) task
1358 * @conn: iscsi connection
1359 * @mtask: task management task
1360 *
1361 * Notes:
1362 * The function can return -EAGAIN in which case caller must
1363 * call it again later, or recover. '0' return code means successful
1364 * xmit.
1365 **/
1366static int
1367iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1368{
1369 int rc;
1370
1371 /* Flush any pending data first. */
1372 rc = iscsi_tcp_flush(conn);
1373 if (rc < 0)
1374 return rc;
1375
1376 if (mtask->hdr->itt == RESERVED_ITT) {
1377 struct iscsi_session *session = conn->session;
1378
1379 spin_lock_bh(&session->lock);
1380 iscsi_free_mgmt_task(conn, mtask);
1381 spin_unlock_bh(&session->lock);
1382 }
1383
1384 return 0;
1385}
1386
1387/*
1388 * iscsi_tcp_ctask_xmit - xmit normal PDU task
1389 * @conn: iscsi connection
1390 * @ctask: iscsi command task
1391 *
1392 * We're expected to return 0 when everything was transmitted succesfully,
1393 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1394 * of error.
1395 */
1396static int
1397iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1398{
1399 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1400 struct scsi_cmnd *sc = ctask->sc;
1401 struct scsi_data_buffer *sdb = scsi_out(sc);
1402 int rc = 0;
1403
1404flush:
1405 /* Flush any pending data first. */
1406 rc = iscsi_tcp_flush(conn);
1407 if (rc < 0)
1408 return rc;
1409
1410 /* Are we done already? */
1411 if (sc->sc_data_direction != DMA_TO_DEVICE)
1412 return 0;
1413
1414 if (ctask->unsol_count != 0) {
1415 struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
1416
1417 /* Prepare a header for the unsolicited PDU.
1418 * The amount of data we want to send will be
1419 * in ctask->data_count.
1420 * FIXME: return the data count instead.
1421 */
1422 iscsi_prep_unsolicit_data_pdu(ctask, hdr);
1423
1424 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
1425 ctask->itt, tcp_ctask->sent, ctask->data_count);
1426
1427 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
1428 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1429 sdb->table.nents, tcp_ctask->sent,
1430 ctask->data_count);
1431 if (rc)
1432 goto fail;
1433 tcp_ctask->sent += ctask->data_count;
1434 ctask->unsol_count -= ctask->data_count;
1435 goto flush;
1436 } else {
1437 struct iscsi_session *session = conn->session;
1438 struct iscsi_r2t_info *r2t;
1439
1440 /* All unsolicited PDUs sent. Check for solicited PDUs.
1441 */
1442 spin_lock_bh(&session->lock);
1443 r2t = tcp_ctask->r2t;
1444 if (r2t != NULL) {
1445 /* Continue with this R2T? */
1446 if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
1447 debug_scsi(" done with r2t %p\n", r2t);
1448
1449 __kfifo_put(tcp_ctask->r2tpool.queue,
1450 (void*)&r2t, sizeof(void*));
1451 tcp_ctask->r2t = r2t = NULL;
1452 }
1453 }
1454
1455 if (r2t == NULL) {
1456 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1457 sizeof(void*));
1458 r2t = tcp_ctask->r2t;
1459 }
1460 spin_unlock_bh(&session->lock);
1461
1462 /* Waiting for more R2Ts to arrive. */
1463 if (r2t == NULL) {
1464 debug_tcp("no R2Ts yet\n");
1465 return 0;
1466 }
1467
1468 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1469 r2t, r2t->solicit_datasn - 1, ctask->itt,
1470 r2t->data_offset + r2t->sent, r2t->data_count);
1471
1472 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
1473 sizeof(struct iscsi_hdr));
1474
1475 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1476 sdb->table.nents,
1477 r2t->data_offset + r2t->sent,
1478 r2t->data_count);
1479 if (rc)
1480 goto fail;
1481 tcp_ctask->sent += r2t->data_count;
1482 r2t->sent += r2t->data_count;
1483 goto flush;
1484 }
1485 return 0;
1486fail:
1487 iscsi_conn_failure(conn, rc);
1488 return -EIO;
1489}
1490
1491static struct iscsi_cls_conn *
1492iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1493{
1494 struct iscsi_conn *conn;
1495 struct iscsi_cls_conn *cls_conn;
1496 struct iscsi_tcp_conn *tcp_conn;
1497
1498 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
1499 if (!cls_conn)
1500 return NULL;
1501 conn = cls_conn->dd_data;
1502 /*
1503 * due to strange issues with iser these are not set
1504 * in iscsi_conn_setup
1505 */
1506 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1507
1508 tcp_conn = conn->dd_data;
1509 tcp_conn->iscsi_conn = conn;
1510
1511 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1512 CRYPTO_ALG_ASYNC);
1513 tcp_conn->tx_hash.flags = 0;
1514 if (IS_ERR(tcp_conn->tx_hash.tfm))
1515 goto free_conn;
1516
1517 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1518 CRYPTO_ALG_ASYNC);
1519 tcp_conn->rx_hash.flags = 0;
1520 if (IS_ERR(tcp_conn->rx_hash.tfm))
1521 goto free_tx_tfm;
1522
1523 return cls_conn;
1524
1525free_tx_tfm:
1526 crypto_free_hash(tcp_conn->tx_hash.tfm);
1527free_conn:
1528 iscsi_conn_printk(KERN_ERR, conn,
1529 "Could not create connection due to crc32c "
1530 "loading error. Make sure the crc32c "
1531 "module is built as a module or into the "
1532 "kernel\n");
1533 iscsi_conn_teardown(cls_conn);
1534 return NULL;
1535}
1536
1537static void
1538iscsi_tcp_release_conn(struct iscsi_conn *conn)
1539{
1540 struct iscsi_session *session = conn->session;
1541 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1542 struct socket *sock = tcp_conn->sock;
1543
1544 if (!sock)
1545 return;
1546
1547 sock_hold(sock->sk);
1548 iscsi_conn_restore_callbacks(tcp_conn);
1549 sock_put(sock->sk);
1550
1551 spin_lock_bh(&session->lock);
1552 tcp_conn->sock = NULL;
1553 conn->recv_lock = NULL;
1554 spin_unlock_bh(&session->lock);
1555 sockfd_put(sock);
1556}
1557
1558static void
1559iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1560{
1561 struct iscsi_conn *conn = cls_conn->dd_data;
1562 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1563
1564 iscsi_tcp_release_conn(conn);
1565
1566 if (tcp_conn->tx_hash.tfm)
1567 crypto_free_hash(tcp_conn->tx_hash.tfm);
1568 if (tcp_conn->rx_hash.tfm)
1569 crypto_free_hash(tcp_conn->rx_hash.tfm);
1570
1571 iscsi_conn_teardown(cls_conn);
1572}
1573
1574static void
1575iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1576{
1577 struct iscsi_conn *conn = cls_conn->dd_data;
1578
1579 iscsi_conn_stop(cls_conn, flag);
1580 iscsi_tcp_release_conn(conn);
1581}
1582
1583static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
1584 char *buf, int *port,
1585 int (*getname)(struct socket *, struct sockaddr *,
1586 int *addrlen))
1587{
1588 struct sockaddr_storage *addr;
1589 struct sockaddr_in6 *sin6;
1590 struct sockaddr_in *sin;
1591 int rc = 0, len;
1592
1593 addr = kmalloc(sizeof(*addr), GFP_KERNEL);
1594 if (!addr)
1595 return -ENOMEM;
1596
1597 if (getname(sock, (struct sockaddr *) addr, &len)) {
1598 rc = -ENODEV;
1599 goto free_addr;
1600 }
1601
1602 switch (addr->ss_family) {
1603 case AF_INET:
1604 sin = (struct sockaddr_in *)addr;
1605 spin_lock_bh(&conn->session->lock);
1606 sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
1607 *port = be16_to_cpu(sin->sin_port);
1608 spin_unlock_bh(&conn->session->lock);
1609 break;
1610 case AF_INET6:
1611 sin6 = (struct sockaddr_in6 *)addr;
1612 spin_lock_bh(&conn->session->lock);
1613 sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
1614 *port = be16_to_cpu(sin6->sin6_port);
1615 spin_unlock_bh(&conn->session->lock);
1616 break;
1617 }
1618free_addr:
1619 kfree(addr);
1620 return rc;
1621}
1622
1623static int
1624iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1625 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
1626 int is_leading)
1627{
1628 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1629 struct iscsi_host *ihost = shost_priv(shost);
1630 struct iscsi_conn *conn = cls_conn->dd_data;
1631 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1632 struct sock *sk;
1633 struct socket *sock;
1634 int err;
1635
1636 /* lookup for existing socket */
1637 sock = sockfd_lookup((int)transport_eph, &err);
1638 if (!sock) {
1639 iscsi_conn_printk(KERN_ERR, conn,
1640 "sockfd_lookup failed %d\n", err);
1641 return -EEXIST;
1642 }
1643 /*
1644 * copy these values now because if we drop the session
1645 * userspace may still want to query the values since we will
1646 * be using them for the reconnect
1647 */
1648 err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
1649 &conn->portal_port, kernel_getpeername);
1650 if (err)
1651 goto free_socket;
1652
1653 err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
1654 &ihost->local_port, kernel_getsockname);
1655 if (err)
1656 goto free_socket;
1657
1658 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
1659 if (err)
1660 goto free_socket;
1661
1662 /* bind iSCSI connection and socket */
1663 tcp_conn->sock = sock;
1664
1665 /* setup Socket parameters */
1666 sk = sock->sk;
1667 sk->sk_reuse = 1;
1668 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
1669 sk->sk_allocation = GFP_ATOMIC;
1670
1671 /* FIXME: disable Nagle's algorithm */
1672
1673 /*
1674 * Intercept TCP callbacks for sendfile like receive
1675 * processing.
1676 */
1677 conn->recv_lock = &sk->sk_callback_lock;
1678 iscsi_conn_set_callbacks(conn);
1679 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
1680 /*
1681 * set receive state machine into initial state
1682 */
1683 iscsi_tcp_hdr_recv_prep(tcp_conn);
1684 return 0;
1685
1686free_socket:
1687 sockfd_put(sock);
1688 return err;
1689}
1690
1691/* called with host lock */
1692static void
1693iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1694{
1695 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
1696
1697 /* Prepare PDU, optionally w/ immediate data */
1698 iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
1699
1700 /* If we have immediate data, attach a payload */
1701 if (mtask->data_count)
1702 iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
1703 mtask->data_count);
1704}
1705
1706static int
1707iscsi_r2tpool_alloc(struct iscsi_session *session)
1708{
1709 int i;
1710 int cmd_i;
1711
1712 /*
1713 * initialize per-task: R2T pool and xmit queue
1714 */
1715 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1716 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1717 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1718
1719 /*
1720 * pre-allocated x4 as much r2ts to handle race when
1721 * target acks DataOut faster than we data_xmit() queues
1722 * could replenish r2tqueue.
1723 */
1724
1725 /* R2T pool */
1726 if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
1727 sizeof(struct iscsi_r2t_info))) {
1728 goto r2t_alloc_fail;
1729 }
1730
1731 /* R2T xmit queue */
1732 tcp_ctask->r2tqueue = kfifo_alloc(
1733 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
1734 if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
1735 iscsi_pool_free(&tcp_ctask->r2tpool);
1736 goto r2t_alloc_fail;
1737 }
1738 }
1739
1740 return 0;
1741
1742r2t_alloc_fail:
1743 for (i = 0; i < cmd_i; i++) {
1744 struct iscsi_cmd_task *ctask = session->cmds[i];
1745 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1746
1747 kfifo_free(tcp_ctask->r2tqueue);
1748 iscsi_pool_free(&tcp_ctask->r2tpool);
1749 }
1750 return -ENOMEM;
1751}
1752
1753static void
1754iscsi_r2tpool_free(struct iscsi_session *session)
1755{
1756 int i;
1757
1758 for (i = 0; i < session->cmds_max; i++) {
1759 struct iscsi_cmd_task *ctask = session->cmds[i];
1760 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1761
1762 kfifo_free(tcp_ctask->r2tqueue);
1763 iscsi_pool_free(&tcp_ctask->r2tpool);
1764 }
1765}
1766
1767static int
1768iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1769 char *buf, int buflen)
1770{
1771 struct iscsi_conn *conn = cls_conn->dd_data;
1772 struct iscsi_session *session = conn->session;
1773 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1774 int value;
1775
1776 switch(param) {
1777 case ISCSI_PARAM_HDRDGST_EN:
1778 iscsi_set_param(cls_conn, param, buf, buflen);
1779 break;
1780 case ISCSI_PARAM_DATADGST_EN:
1781 iscsi_set_param(cls_conn, param, buf, buflen);
1782 tcp_conn->sendpage = conn->datadgst_en ?
1783 sock_no_sendpage : tcp_conn->sock->ops->sendpage;
1784 break;
1785 case ISCSI_PARAM_MAX_R2T:
1786 sscanf(buf, "%d", &value);
1787 if (value <= 0 || !is_power_of_2(value))
1788 return -EINVAL;
1789 if (session->max_r2t == value)
1790 break;
1791 iscsi_r2tpool_free(session);
1792 iscsi_set_param(cls_conn, param, buf, buflen);
1793 if (iscsi_r2tpool_alloc(session))
1794 return -ENOMEM;
1795 break;
1796 default:
1797 return iscsi_set_param(cls_conn, param, buf, buflen);
1798 }
1799
1800 return 0;
1801}
1802
1803static int
1804iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
1805 enum iscsi_param param, char *buf)
1806{
1807 struct iscsi_conn *conn = cls_conn->dd_data;
1808 int len;
1809
1810 switch(param) {
1811 case ISCSI_PARAM_CONN_PORT:
1812 spin_lock_bh(&conn->session->lock);
1813 len = sprintf(buf, "%hu\n", conn->portal_port);
1814 spin_unlock_bh(&conn->session->lock);
1815 break;
1816 case ISCSI_PARAM_CONN_ADDRESS:
1817 spin_lock_bh(&conn->session->lock);
1818 len = sprintf(buf, "%s\n", conn->portal_address);
1819 spin_unlock_bh(&conn->session->lock);
1820 break;
1821 default:
1822 return iscsi_conn_get_param(cls_conn, param, buf);
1823 }
1824
1825 return len;
1826}
1827
1828static void
1829iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
1830{
1831 struct iscsi_conn *conn = cls_conn->dd_data;
1832 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1833
1834 stats->txdata_octets = conn->txdata_octets;
1835 stats->rxdata_octets = conn->rxdata_octets;
1836 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1837 stats->dataout_pdus = conn->dataout_pdus_cnt;
1838 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1839 stats->datain_pdus = conn->datain_pdus_cnt;
1840 stats->r2t_pdus = conn->r2t_pdus_cnt;
1841 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1842 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1843 stats->custom_length = 3;
1844 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
1845 stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
1846 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
1847 stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
1848 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1849 stats->custom[2].value = conn->eh_abort_cnt;
1850}
1851
1852static struct iscsi_cls_session *
1853iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
1854 uint16_t qdepth, uint32_t initial_cmdsn,
1855 uint32_t *hostno)
1856{
1857 struct iscsi_cls_session *cls_session;
1858 struct iscsi_session *session;
1859 int cmd_i;
1860
1861 if (shost) {
1862 printk(KERN_ERR "iscsi_tcp: invalid shost %d.\n",
1863 shost->host_no);
1864 return NULL;
1865 }
1866
1867 shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
1868 if (!shost)
1869 return NULL;
1870 shost->transportt = iscsi_tcp_scsi_transport;
1871 shost->max_lun = iscsi_max_lun;
1872 shost->max_id = 0;
1873 shost->max_channel = 0;
1874 shost->max_cmd_len = 16;
1875 shost->can_queue = cmds_max;
1876
1877 if (iscsi_host_add(shost, NULL))
1878 goto free_host;
1879 *hostno = shost->host_no;
1880
1881 cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
1882 sizeof(struct iscsi_tcp_cmd_task),
1883 sizeof(struct iscsi_tcp_mgmt_task),
1884 initial_cmdsn);
1885 if (!cls_session)
1886 goto remove_host;
1887 session = cls_session->dd_data;
1888
1889 shost->can_queue = session->cmds_max;
1890 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1891 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1892 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1893
1894 ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
1895 ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
1896 }
1897
1898 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
1899 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
1900 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1901
1902 mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
1903 }
1904
1905 if (iscsi_r2tpool_alloc(session))
1906 goto remove_session;
1907 return cls_session;
1908
1909remove_session:
1910 iscsi_session_teardown(cls_session);
1911remove_host:
1912 iscsi_host_remove(shost);
1913free_host:
1914 iscsi_host_free(shost);
1915 return NULL;
1916}
1917
1918static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
1919{
1920 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1921
1922 iscsi_r2tpool_free(cls_session->dd_data);
1923
1924 iscsi_host_remove(shost);
1925 iscsi_host_free(shost);
1926}
1927
1928static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
1929{
1930 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
1931 blk_queue_dma_alignment(sdev->request_queue, 0);
1932 return 0;
1933}
1934
1935static struct scsi_host_template iscsi_sht = {
1936 .module = THIS_MODULE,
1937 .name = "iSCSI Initiator over TCP/IP",
1938 .queuecommand = iscsi_queuecommand,
1939 .change_queue_depth = iscsi_change_queue_depth,
1940 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
1941 .sg_tablesize = 4096,
1942 .max_sectors = 0xFFFF,
1943 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
1944 .eh_abort_handler = iscsi_eh_abort,
1945 .eh_device_reset_handler= iscsi_eh_device_reset,
1946 .eh_host_reset_handler = iscsi_eh_host_reset,
1947 .use_clustering = DISABLE_CLUSTERING,
1948 .slave_configure = iscsi_tcp_slave_configure,
1949 .proc_name = "iscsi_tcp",
1950 .this_id = -1,
1951};
1952
1953static struct iscsi_transport iscsi_tcp_transport = {
1954 .owner = THIS_MODULE,
1955 .name = "tcp",
1956 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
1957 | CAP_DATADGST,
1958 .param_mask = ISCSI_MAX_RECV_DLENGTH |
1959 ISCSI_MAX_XMIT_DLENGTH |
1960 ISCSI_HDRDGST_EN |
1961 ISCSI_DATADGST_EN |
1962 ISCSI_INITIAL_R2T_EN |
1963 ISCSI_MAX_R2T |
1964 ISCSI_IMM_DATA_EN |
1965 ISCSI_FIRST_BURST |
1966 ISCSI_MAX_BURST |
1967 ISCSI_PDU_INORDER_EN |
1968 ISCSI_DATASEQ_INORDER_EN |
1969 ISCSI_ERL |
1970 ISCSI_CONN_PORT |
1971 ISCSI_CONN_ADDRESS |
1972 ISCSI_EXP_STATSN |
1973 ISCSI_PERSISTENT_PORT |
1974 ISCSI_PERSISTENT_ADDRESS |
1975 ISCSI_TARGET_NAME | ISCSI_TPGT |
1976 ISCSI_USERNAME | ISCSI_PASSWORD |
1977 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
1978 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
1979 ISCSI_LU_RESET_TMO |
1980 ISCSI_PING_TMO | ISCSI_RECV_TMO,
1981 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
1982 ISCSI_HOST_INITIATOR_NAME |
1983 ISCSI_HOST_NETDEV_NAME,
1984 /* session management */
1985 .create_session = iscsi_tcp_session_create,
1986 .destroy_session = iscsi_tcp_session_destroy,
1987 /* connection management */
1988 .create_conn = iscsi_tcp_conn_create,
1989 .bind_conn = iscsi_tcp_conn_bind,
1990 .destroy_conn = iscsi_tcp_conn_destroy,
1991 .set_param = iscsi_conn_set_param,
1992 .get_conn_param = iscsi_tcp_conn_get_param,
1993 .get_session_param = iscsi_session_get_param,
1994 .start_conn = iscsi_conn_start,
1995 .stop_conn = iscsi_tcp_conn_stop,
1996 /* iscsi host params */
1997 .get_host_param = iscsi_host_get_param,
1998 .set_host_param = iscsi_host_set_param,
1999 /* IO */
2000 .send_pdu = iscsi_conn_send_pdu,
2001 .get_stats = iscsi_conn_get_stats,
2002 .init_cmd_task = iscsi_tcp_ctask_init,
2003 .init_mgmt_task = iscsi_tcp_mtask_init,
2004 .xmit_cmd_task = iscsi_tcp_ctask_xmit,
2005 .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
2006 .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
2007 /* recovery */
2008 .session_recovery_timedout = iscsi_session_recovery_timedout,
2009};
2010
2011static int __init
2012iscsi_tcp_init(void)
2013{
2014 if (iscsi_max_lun < 1) {
2015 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
2016 iscsi_max_lun);
2017 return -EINVAL;
2018 }
2019
2020 iscsi_tcp_scsi_transport = iscsi_register_transport(
2021 &iscsi_tcp_transport);
2022 if (!iscsi_tcp_scsi_transport)
2023 return -ENODEV;
2024
2025 return 0;
2026}
2027
2028static void __exit
2029iscsi_tcp_exit(void)
2030{
2031 iscsi_unregister_transport(&iscsi_tcp_transport);
2032}
2033
2034module_init(iscsi_tcp_init);
2035module_exit(iscsi_tcp_exit);