]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/libiscsi.c
block: unify request timeout handling
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libiscsi.c
1 /*
2 * iSCSI lib functions
3 *
4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2004 - 2006 Mike Christie
6 * Copyright (C) 2004 - 2005 Dmitry Yusupov
7 * Copyright (C) 2004 - 2005 Alex Aizman
8 * maintained by open-iscsi@googlegroups.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24 #include <linux/types.h>
25 #include <linux/kfifo.h>
26 #include <linux/delay.h>
27 #include <linux/log2.h>
28 #include <asm/unaligned.h>
29 #include <net/tcp.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include <scsi/iscsi_proto.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_transport_iscsi.h>
39 #include <scsi/libiscsi.h>
40
41 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
42 #define SNA32_CHECK 2147483648UL
43
44 static int iscsi_sna_lt(u32 n1, u32 n2)
45 {
46 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
47 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
48 }
49
50 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
51 static int iscsi_sna_lte(u32 n1, u32 n2)
52 {
53 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
54 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
55 }
56
57 void
58 iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
59 {
60 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
61 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
62
63 /*
64 * standard specifies this check for when to update expected and
65 * max sequence numbers
66 */
67 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
68 return;
69
70 if (exp_cmdsn != session->exp_cmdsn &&
71 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
72 session->exp_cmdsn = exp_cmdsn;
73
74 if (max_cmdsn != session->max_cmdsn &&
75 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
76 session->max_cmdsn = max_cmdsn;
77 /*
78 * if the window closed with IO queued, then kick the
79 * xmit thread
80 */
81 if (!list_empty(&session->leadconn->xmitqueue) ||
82 !list_empty(&session->leadconn->mgmtqueue)) {
83 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
84 scsi_queue_work(session->host,
85 &session->leadconn->xmitwork);
86 }
87 }
88 }
89 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
90
91 void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
92 struct iscsi_data *hdr)
93 {
94 struct iscsi_conn *conn = task->conn;
95
96 memset(hdr, 0, sizeof(struct iscsi_data));
97 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
98 hdr->datasn = cpu_to_be32(task->unsol_datasn);
99 task->unsol_datasn++;
100 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
101 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
102
103 hdr->itt = task->hdr->itt;
104 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
105 hdr->offset = cpu_to_be32(task->unsol_offset);
106
107 if (task->unsol_count > conn->max_xmit_dlength) {
108 hton24(hdr->dlength, conn->max_xmit_dlength);
109 task->data_count = conn->max_xmit_dlength;
110 task->unsol_offset += task->data_count;
111 hdr->flags = 0;
112 } else {
113 hton24(hdr->dlength, task->unsol_count);
114 task->data_count = task->unsol_count;
115 hdr->flags = ISCSI_FLAG_CMD_FINAL;
116 }
117 }
118 EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
119
120 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
121 {
122 unsigned exp_len = task->hdr_len + len;
123
124 if (exp_len > task->hdr_max) {
125 WARN_ON(1);
126 return -EINVAL;
127 }
128
129 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
130 task->hdr_len = exp_len;
131 return 0;
132 }
133
134 /*
135 * make an extended cdb AHS
136 */
137 static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
138 {
139 struct scsi_cmnd *cmd = task->sc;
140 unsigned rlen, pad_len;
141 unsigned short ahslength;
142 struct iscsi_ecdb_ahdr *ecdb_ahdr;
143 int rc;
144
145 ecdb_ahdr = iscsi_next_hdr(task);
146 rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
147
148 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
149 ahslength = rlen + sizeof(ecdb_ahdr->reserved);
150
151 pad_len = iscsi_padding(rlen);
152
153 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
154 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
155 if (rc)
156 return rc;
157
158 if (pad_len)
159 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
160
161 ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
162 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
163 ecdb_ahdr->reserved = 0;
164 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
165
166 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
167 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
168 cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
169
170 return 0;
171 }
172
173 static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
174 {
175 struct scsi_cmnd *sc = task->sc;
176 struct iscsi_rlength_ahdr *rlen_ahdr;
177 int rc;
178
179 rlen_ahdr = iscsi_next_hdr(task);
180 rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
181 if (rc)
182 return rc;
183
184 rlen_ahdr->ahslength =
185 cpu_to_be16(sizeof(rlen_ahdr->read_length) +
186 sizeof(rlen_ahdr->reserved));
187 rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
188 rlen_ahdr->reserved = 0;
189 rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
190
191 debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
192 "rlen_ahdr->ahslength(%d)\n",
193 be32_to_cpu(rlen_ahdr->read_length),
194 be16_to_cpu(rlen_ahdr->ahslength));
195 return 0;
196 }
197
198 /**
199 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
200 * @task: iscsi task
201 *
202 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
203 * fields like dlength or final based on how much data it sends
204 */
205 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
206 {
207 struct iscsi_conn *conn = task->conn;
208 struct iscsi_session *session = conn->session;
209 struct iscsi_cmd *hdr = task->hdr;
210 struct scsi_cmnd *sc = task->sc;
211 unsigned hdrlength, cmd_len;
212 int rc;
213
214 task->hdr_len = 0;
215 rc = iscsi_add_hdr(task, sizeof(*hdr));
216 if (rc)
217 return rc;
218 hdr->opcode = ISCSI_OP_SCSI_CMD;
219 hdr->flags = ISCSI_ATTR_SIMPLE;
220 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
221 hdr->itt = build_itt(task->itt, session->age);
222 hdr->cmdsn = cpu_to_be32(session->cmdsn);
223 session->cmdsn++;
224 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
225 cmd_len = sc->cmd_len;
226 if (cmd_len < ISCSI_CDB_SIZE)
227 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
228 else if (cmd_len > ISCSI_CDB_SIZE) {
229 rc = iscsi_prep_ecdb_ahs(task);
230 if (rc)
231 return rc;
232 cmd_len = ISCSI_CDB_SIZE;
233 }
234 memcpy(hdr->cdb, sc->cmnd, cmd_len);
235
236 task->imm_count = 0;
237 if (scsi_bidi_cmnd(sc)) {
238 hdr->flags |= ISCSI_FLAG_CMD_READ;
239 rc = iscsi_prep_bidi_ahs(task);
240 if (rc)
241 return rc;
242 }
243 if (sc->sc_data_direction == DMA_TO_DEVICE) {
244 unsigned out_len = scsi_out(sc)->length;
245 hdr->data_length = cpu_to_be32(out_len);
246 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
247 /*
248 * Write counters:
249 *
250 * imm_count bytes to be sent right after
251 * SCSI PDU Header
252 *
253 * unsol_count bytes(as Data-Out) to be sent
254 * without R2T ack right after
255 * immediate data
256 *
257 * r2t_data_count bytes to be sent via R2T ack's
258 *
259 * pad_count bytes to be sent as zero-padding
260 */
261 task->unsol_count = 0;
262 task->unsol_offset = 0;
263 task->unsol_datasn = 0;
264
265 if (session->imm_data_en) {
266 if (out_len >= session->first_burst)
267 task->imm_count = min(session->first_burst,
268 conn->max_xmit_dlength);
269 else
270 task->imm_count = min(out_len,
271 conn->max_xmit_dlength);
272 hton24(hdr->dlength, task->imm_count);
273 } else
274 zero_data(hdr->dlength);
275
276 if (!session->initial_r2t_en) {
277 task->unsol_count = min(session->first_burst, out_len)
278 - task->imm_count;
279 task->unsol_offset = task->imm_count;
280 }
281
282 if (!task->unsol_count)
283 /* No unsolicit Data-Out's */
284 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
285 } else {
286 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
287 zero_data(hdr->dlength);
288 hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
289
290 if (sc->sc_data_direction == DMA_FROM_DEVICE)
291 hdr->flags |= ISCSI_FLAG_CMD_READ;
292 }
293
294 /* calculate size of additional header segments (AHSs) */
295 hdrlength = task->hdr_len - sizeof(*hdr);
296
297 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
298 hdrlength /= ISCSI_PAD_LEN;
299
300 WARN_ON(hdrlength >= 256);
301 hdr->hlength = hdrlength & 0xFF;
302
303 if (conn->session->tt->init_task &&
304 conn->session->tt->init_task(task))
305 return -EIO;
306
307 task->state = ISCSI_TASK_RUNNING;
308 list_move_tail(&task->running, &conn->run_list);
309
310 conn->scsicmd_pdus_cnt++;
311 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
312 "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
313 "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
314 "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
315 scsi_bufflen(sc),
316 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
317 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
318 return 0;
319 }
320
321 /**
322 * iscsi_complete_command - finish a task
323 * @task: iscsi cmd task
324 *
325 * Must be called with session lock.
326 * This function returns the scsi command to scsi-ml or cleans
327 * up mgmt tasks then returns the task to the pool.
328 */
329 static void iscsi_complete_command(struct iscsi_task *task)
330 {
331 struct iscsi_conn *conn = task->conn;
332 struct iscsi_session *session = conn->session;
333 struct scsi_cmnd *sc = task->sc;
334
335 list_del_init(&task->running);
336 task->state = ISCSI_TASK_COMPLETED;
337 task->sc = NULL;
338
339 if (conn->task == task)
340 conn->task = NULL;
341 /*
342 * login task is preallocated so do not free
343 */
344 if (conn->login_task == task)
345 return;
346
347 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
348
349 if (conn->ping_task == task)
350 conn->ping_task = NULL;
351
352 if (sc) {
353 task->sc = NULL;
354 /* SCSI eh reuses commands to verify us */
355 sc->SCp.ptr = NULL;
356 /*
357 * queue command may call this to free the task, but
358 * not have setup the sc callback
359 */
360 if (sc->scsi_done)
361 sc->scsi_done(sc);
362 }
363 }
364
365 void __iscsi_get_task(struct iscsi_task *task)
366 {
367 atomic_inc(&task->refcount);
368 }
369 EXPORT_SYMBOL_GPL(__iscsi_get_task);
370
371 static void __iscsi_put_task(struct iscsi_task *task)
372 {
373 if (atomic_dec_and_test(&task->refcount))
374 iscsi_complete_command(task);
375 }
376
377 void iscsi_put_task(struct iscsi_task *task)
378 {
379 struct iscsi_session *session = task->conn->session;
380
381 spin_lock_bh(&session->lock);
382 __iscsi_put_task(task);
383 spin_unlock_bh(&session->lock);
384 }
385 EXPORT_SYMBOL_GPL(iscsi_put_task);
386
387 /*
388 * session lock must be held
389 */
390 static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
391 int err)
392 {
393 struct scsi_cmnd *sc;
394
395 sc = task->sc;
396 if (!sc)
397 return;
398
399 if (task->state == ISCSI_TASK_PENDING)
400 /*
401 * cmd never made it to the xmit thread, so we should not count
402 * the cmd in the sequencing
403 */
404 conn->session->queued_cmdsn--;
405 else
406 conn->session->tt->cleanup_task(conn, task);
407 /*
408 * Check if cleanup_task dropped the lock and the command completed,
409 */
410 if (!task->sc)
411 return;
412
413 sc->result = err;
414 if (!scsi_bidi_cmnd(sc))
415 scsi_set_resid(sc, scsi_bufflen(sc));
416 else {
417 scsi_out(sc)->resid = scsi_out(sc)->length;
418 scsi_in(sc)->resid = scsi_in(sc)->length;
419 }
420
421 if (conn->task == task)
422 conn->task = NULL;
423 /* release ref from queuecommand */
424 __iscsi_put_task(task);
425 }
426
427 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
428 struct iscsi_task *task)
429 {
430 struct iscsi_session *session = conn->session;
431 struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
432 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
433
434 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
435 return -ENOTCONN;
436
437 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
438 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
439 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
440 /*
441 * pre-format CmdSN for outgoing PDU.
442 */
443 nop->cmdsn = cpu_to_be32(session->cmdsn);
444 if (hdr->itt != RESERVED_ITT) {
445 hdr->itt = build_itt(task->itt, session->age);
446 /*
447 * TODO: We always use immediate, so we never hit this.
448 * If we start to send tmfs or nops as non-immediate then
449 * we should start checking the cmdsn numbers for mgmt tasks.
450 */
451 if (conn->c_stage == ISCSI_CONN_STARTED &&
452 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
453 session->queued_cmdsn++;
454 session->cmdsn++;
455 }
456 }
457
458 if (session->tt->init_task)
459 session->tt->init_task(task);
460
461 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
462 session->state = ISCSI_STATE_LOGGING_OUT;
463
464 list_move_tail(&task->running, &conn->mgmt_run_list);
465 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
466 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
467 task->data_count);
468 return 0;
469 }
470
471 static struct iscsi_task *
472 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
473 char *data, uint32_t data_size)
474 {
475 struct iscsi_session *session = conn->session;
476 struct iscsi_task *task;
477
478 if (session->state == ISCSI_STATE_TERMINATE)
479 return NULL;
480
481 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
482 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
483 /*
484 * Login and Text are sent serially, in
485 * request-followed-by-response sequence.
486 * Same task can be used. Same ITT must be used.
487 * Note that login_task is preallocated at conn_create().
488 */
489 task = conn->login_task;
490 else {
491 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
492 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
493
494 if (!__kfifo_get(session->cmdpool.queue,
495 (void*)&task, sizeof(void*)))
496 return NULL;
497
498 if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
499 hdr->ttt == RESERVED_ITT) {
500 conn->ping_task = task;
501 conn->last_ping = jiffies;
502 }
503 }
504 /*
505 * released in complete pdu for task we expect a response for, and
506 * released by the lld when it has transmitted the task for
507 * pdus we do not expect a response for.
508 */
509 atomic_set(&task->refcount, 1);
510 task->conn = conn;
511 task->sc = NULL;
512
513 if (data_size) {
514 memcpy(task->data, data, data_size);
515 task->data_count = data_size;
516 } else
517 task->data_count = 0;
518
519 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
520 INIT_LIST_HEAD(&task->running);
521 list_add_tail(&task->running, &conn->mgmtqueue);
522
523 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
524 if (iscsi_prep_mgmt_task(conn, task)) {
525 __iscsi_put_task(task);
526 return NULL;
527 }
528
529 if (session->tt->xmit_task(task))
530 task = NULL;
531
532 } else
533 scsi_queue_work(conn->session->host, &conn->xmitwork);
534
535 return task;
536 }
537
538 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
539 char *data, uint32_t data_size)
540 {
541 struct iscsi_conn *conn = cls_conn->dd_data;
542 struct iscsi_session *session = conn->session;
543 int err = 0;
544
545 spin_lock_bh(&session->lock);
546 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
547 err = -EPERM;
548 spin_unlock_bh(&session->lock);
549 return err;
550 }
551 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
552
553 /**
554 * iscsi_cmd_rsp - SCSI Command Response processing
555 * @conn: iscsi connection
556 * @hdr: iscsi header
557 * @task: scsi command task
558 * @data: cmd data buffer
559 * @datalen: len of buffer
560 *
561 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
562 * then completes the command and task.
563 **/
564 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
565 struct iscsi_task *task, char *data,
566 int datalen)
567 {
568 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
569 struct iscsi_session *session = conn->session;
570 struct scsi_cmnd *sc = task->sc;
571
572 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
573 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
574
575 sc->result = (DID_OK << 16) | rhdr->cmd_status;
576
577 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
578 sc->result = DID_ERROR << 16;
579 goto out;
580 }
581
582 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
583 uint16_t senselen;
584
585 if (datalen < 2) {
586 invalid_datalen:
587 iscsi_conn_printk(KERN_ERR, conn,
588 "Got CHECK_CONDITION but invalid data "
589 "buffer size of %d\n", datalen);
590 sc->result = DID_BAD_TARGET << 16;
591 goto out;
592 }
593
594 senselen = get_unaligned_be16(data);
595 if (datalen < senselen)
596 goto invalid_datalen;
597
598 memcpy(sc->sense_buffer, data + 2,
599 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
600 debug_scsi("copied %d bytes of sense\n",
601 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
602 }
603
604 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
605 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
606 int res_count = be32_to_cpu(rhdr->bi_residual_count);
607
608 if (scsi_bidi_cmnd(sc) && res_count > 0 &&
609 (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
610 res_count <= scsi_in(sc)->length))
611 scsi_in(sc)->resid = res_count;
612 else
613 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
614 }
615
616 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
617 ISCSI_FLAG_CMD_OVERFLOW)) {
618 int res_count = be32_to_cpu(rhdr->residual_count);
619
620 if (res_count > 0 &&
621 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
622 res_count <= scsi_bufflen(sc)))
623 /* write side for bidi or uni-io set_resid */
624 scsi_set_resid(sc, res_count);
625 else
626 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
627 }
628 out:
629 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
630 (long)sc, sc->result, task->itt);
631 conn->scsirsp_pdus_cnt++;
632
633 __iscsi_put_task(task);
634 }
635
636 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
637 {
638 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
639
640 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
641 conn->tmfrsp_pdus_cnt++;
642
643 if (conn->tmf_state != TMF_QUEUED)
644 return;
645
646 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
647 conn->tmf_state = TMF_SUCCESS;
648 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
649 conn->tmf_state = TMF_NOT_FOUND;
650 else
651 conn->tmf_state = TMF_FAILED;
652 wake_up(&conn->ehwait);
653 }
654
655 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
656 {
657 struct iscsi_nopout hdr;
658 struct iscsi_task *task;
659
660 if (!rhdr && conn->ping_task)
661 return;
662
663 memset(&hdr, 0, sizeof(struct iscsi_nopout));
664 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
665 hdr.flags = ISCSI_FLAG_CMD_FINAL;
666
667 if (rhdr) {
668 memcpy(hdr.lun, rhdr->lun, 8);
669 hdr.ttt = rhdr->ttt;
670 hdr.itt = RESERVED_ITT;
671 } else
672 hdr.ttt = RESERVED_ITT;
673
674 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
675 if (!task)
676 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
677 }
678
679 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
680 char *data, int datalen)
681 {
682 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
683 struct iscsi_hdr rejected_pdu;
684 uint32_t itt;
685
686 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
687
688 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
689 if (ntoh24(reject->dlength) > datalen)
690 return ISCSI_ERR_PROTO;
691
692 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
693 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
694 itt = get_itt(rejected_pdu.itt);
695 iscsi_conn_printk(KERN_ERR, conn,
696 "itt 0x%x had pdu (op 0x%x) rejected "
697 "due to DataDigest error.\n", itt,
698 rejected_pdu.opcode);
699 }
700 }
701 return 0;
702 }
703
704 /**
705 * iscsi_itt_to_task - look up task by itt
706 * @conn: iscsi connection
707 * @itt: itt
708 *
709 * This should be used for mgmt tasks like login and nops, or if
710 * the LDD's itt space does not include the session age.
711 *
712 * The session lock must be held.
713 */
714 static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
715 {
716 struct iscsi_session *session = conn->session;
717 uint32_t i;
718
719 if (itt == RESERVED_ITT)
720 return NULL;
721
722 i = get_itt(itt);
723 if (i >= session->cmds_max)
724 return NULL;
725
726 return session->cmds[i];
727 }
728
729 /**
730 * __iscsi_complete_pdu - complete pdu
731 * @conn: iscsi conn
732 * @hdr: iscsi header
733 * @data: data buffer
734 * @datalen: len of data buffer
735 *
736 * Completes pdu processing by freeing any resources allocated at
737 * queuecommand or send generic. session lock must be held and verify
738 * itt must have been called.
739 */
740 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
741 char *data, int datalen)
742 {
743 struct iscsi_session *session = conn->session;
744 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
745 struct iscsi_task *task;
746 uint32_t itt;
747
748 conn->last_recv = jiffies;
749 rc = iscsi_verify_itt(conn, hdr->itt);
750 if (rc)
751 return rc;
752
753 if (hdr->itt != RESERVED_ITT)
754 itt = get_itt(hdr->itt);
755 else
756 itt = ~0U;
757
758 debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
759 opcode, conn->id, itt, datalen);
760
761 if (itt == ~0U) {
762 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
763
764 switch(opcode) {
765 case ISCSI_OP_NOOP_IN:
766 if (datalen) {
767 rc = ISCSI_ERR_PROTO;
768 break;
769 }
770
771 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
772 break;
773
774 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
775 break;
776 case ISCSI_OP_REJECT:
777 rc = iscsi_handle_reject(conn, hdr, data, datalen);
778 break;
779 case ISCSI_OP_ASYNC_EVENT:
780 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
781 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
782 rc = ISCSI_ERR_CONN_FAILED;
783 break;
784 default:
785 rc = ISCSI_ERR_BAD_OPCODE;
786 break;
787 }
788 goto out;
789 }
790
791 switch(opcode) {
792 case ISCSI_OP_SCSI_CMD_RSP:
793 case ISCSI_OP_SCSI_DATA_IN:
794 task = iscsi_itt_to_ctask(conn, hdr->itt);
795 if (!task)
796 return ISCSI_ERR_BAD_ITT;
797 break;
798 case ISCSI_OP_R2T:
799 /*
800 * LLD handles R2Ts if they need to.
801 */
802 return 0;
803 case ISCSI_OP_LOGOUT_RSP:
804 case ISCSI_OP_LOGIN_RSP:
805 case ISCSI_OP_TEXT_RSP:
806 case ISCSI_OP_SCSI_TMFUNC_RSP:
807 case ISCSI_OP_NOOP_IN:
808 task = iscsi_itt_to_task(conn, hdr->itt);
809 if (!task)
810 return ISCSI_ERR_BAD_ITT;
811 break;
812 default:
813 return ISCSI_ERR_BAD_OPCODE;
814 }
815
816 switch(opcode) {
817 case ISCSI_OP_SCSI_CMD_RSP:
818 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
819 break;
820 case ISCSI_OP_SCSI_DATA_IN:
821 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
822 conn->scsirsp_pdus_cnt++;
823 iscsi_update_cmdsn(session,
824 (struct iscsi_nopin*) hdr);
825 __iscsi_put_task(task);
826 }
827 break;
828 case ISCSI_OP_LOGOUT_RSP:
829 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
830 if (datalen) {
831 rc = ISCSI_ERR_PROTO;
832 break;
833 }
834 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
835 goto recv_pdu;
836 case ISCSI_OP_LOGIN_RSP:
837 case ISCSI_OP_TEXT_RSP:
838 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
839 /*
840 * login related PDU's exp_statsn is handled in
841 * userspace
842 */
843 goto recv_pdu;
844 case ISCSI_OP_SCSI_TMFUNC_RSP:
845 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
846 if (datalen) {
847 rc = ISCSI_ERR_PROTO;
848 break;
849 }
850
851 iscsi_tmf_rsp(conn, hdr);
852 __iscsi_put_task(task);
853 break;
854 case ISCSI_OP_NOOP_IN:
855 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
856 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
857 rc = ISCSI_ERR_PROTO;
858 break;
859 }
860 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
861
862 if (conn->ping_task != task)
863 /*
864 * If this is not in response to one of our
865 * nops then it must be from userspace.
866 */
867 goto recv_pdu;
868
869 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
870 __iscsi_put_task(task);
871 break;
872 default:
873 rc = ISCSI_ERR_BAD_OPCODE;
874 break;
875 }
876
877 out:
878 return rc;
879 recv_pdu:
880 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
881 rc = ISCSI_ERR_CONN_FAILED;
882 __iscsi_put_task(task);
883 return rc;
884 }
885 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
886
887 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
888 char *data, int datalen)
889 {
890 int rc;
891
892 spin_lock(&conn->session->lock);
893 rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
894 spin_unlock(&conn->session->lock);
895 return rc;
896 }
897 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
898
899 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
900 {
901 struct iscsi_session *session = conn->session;
902 uint32_t i;
903
904 if (itt == RESERVED_ITT)
905 return 0;
906
907 if (((__force u32)itt & ISCSI_AGE_MASK) !=
908 (session->age << ISCSI_AGE_SHIFT)) {
909 iscsi_conn_printk(KERN_ERR, conn,
910 "received itt %x expected session age (%x)\n",
911 (__force u32)itt, session->age);
912 return ISCSI_ERR_BAD_ITT;
913 }
914
915 i = get_itt(itt);
916 if (i >= session->cmds_max) {
917 iscsi_conn_printk(KERN_ERR, conn,
918 "received invalid itt index %u (max cmds "
919 "%u.\n", i, session->cmds_max);
920 return ISCSI_ERR_BAD_ITT;
921 }
922 return 0;
923 }
924 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
925
926 /**
927 * iscsi_itt_to_ctask - look up ctask by itt
928 * @conn: iscsi connection
929 * @itt: itt
930 *
931 * This should be used for cmd tasks.
932 *
933 * The session lock must be held.
934 */
935 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
936 {
937 struct iscsi_task *task;
938
939 if (iscsi_verify_itt(conn, itt))
940 return NULL;
941
942 task = iscsi_itt_to_task(conn, itt);
943 if (!task || !task->sc)
944 return NULL;
945
946 if (task->sc->SCp.phase != conn->session->age) {
947 iscsi_session_printk(KERN_ERR, conn->session,
948 "task's session age %d, expected %d\n",
949 task->sc->SCp.phase, conn->session->age);
950 return NULL;
951 }
952
953 return task;
954 }
955 EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
956
957 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
958 {
959 struct iscsi_session *session = conn->session;
960 unsigned long flags;
961
962 spin_lock_irqsave(&session->lock, flags);
963 if (session->state == ISCSI_STATE_FAILED) {
964 spin_unlock_irqrestore(&session->lock, flags);
965 return;
966 }
967
968 if (conn->stop_stage == 0)
969 session->state = ISCSI_STATE_FAILED;
970 spin_unlock_irqrestore(&session->lock, flags);
971 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
972 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
973 iscsi_conn_error(conn->cls_conn, err);
974 }
975 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
976
977 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
978 {
979 struct iscsi_session *session = conn->session;
980
981 /*
982 * Check for iSCSI window and take care of CmdSN wrap-around
983 */
984 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
985 debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u "
986 "CmdSN %u/%u\n", session->exp_cmdsn,
987 session->max_cmdsn, session->cmdsn,
988 session->queued_cmdsn);
989 return -ENOSPC;
990 }
991 return 0;
992 }
993
994 static int iscsi_xmit_task(struct iscsi_conn *conn)
995 {
996 struct iscsi_task *task = conn->task;
997 int rc;
998
999 __iscsi_get_task(task);
1000 spin_unlock_bh(&conn->session->lock);
1001 rc = conn->session->tt->xmit_task(task);
1002 spin_lock_bh(&conn->session->lock);
1003 __iscsi_put_task(task);
1004 if (!rc)
1005 /* done with this task */
1006 conn->task = NULL;
1007 return rc;
1008 }
1009
1010 /**
1011 * iscsi_requeue_task - requeue task to run from session workqueue
1012 * @task: task to requeue
1013 *
1014 * LLDs that need to run a task from the session workqueue should call
1015 * this. The session lock must be held. This should only be called
1016 * by software drivers.
1017 */
1018 void iscsi_requeue_task(struct iscsi_task *task)
1019 {
1020 struct iscsi_conn *conn = task->conn;
1021
1022 list_move_tail(&task->running, &conn->requeue);
1023 scsi_queue_work(conn->session->host, &conn->xmitwork);
1024 }
1025 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
1026
1027 /**
1028 * iscsi_data_xmit - xmit any command into the scheduled connection
1029 * @conn: iscsi connection
1030 *
1031 * Notes:
1032 * The function can return -EAGAIN in which case the caller must
1033 * re-schedule it again later or recover. '0' return code means
1034 * successful xmit.
1035 **/
1036 static int iscsi_data_xmit(struct iscsi_conn *conn)
1037 {
1038 int rc = 0;
1039
1040 spin_lock_bh(&conn->session->lock);
1041 if (unlikely(conn->suspend_tx)) {
1042 debug_scsi("conn %d Tx suspended!\n", conn->id);
1043 spin_unlock_bh(&conn->session->lock);
1044 return -ENODATA;
1045 }
1046
1047 if (conn->task) {
1048 rc = iscsi_xmit_task(conn);
1049 if (rc)
1050 goto again;
1051 }
1052
1053 /*
1054 * process mgmt pdus like nops before commands since we should
1055 * only have one nop-out as a ping from us and targets should not
1056 * overflow us with nop-ins
1057 */
1058 check_mgmt:
1059 while (!list_empty(&conn->mgmtqueue)) {
1060 conn->task = list_entry(conn->mgmtqueue.next,
1061 struct iscsi_task, running);
1062 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1063 __iscsi_put_task(conn->task);
1064 conn->task = NULL;
1065 continue;
1066 }
1067 rc = iscsi_xmit_task(conn);
1068 if (rc)
1069 goto again;
1070 }
1071
1072 /* process pending command queue */
1073 while (!list_empty(&conn->xmitqueue)) {
1074 if (conn->tmf_state == TMF_QUEUED)
1075 break;
1076
1077 conn->task = list_entry(conn->xmitqueue.next,
1078 struct iscsi_task, running);
1079 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1080 fail_command(conn, conn->task, DID_IMM_RETRY << 16);
1081 continue;
1082 }
1083 if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
1084 fail_command(conn, conn->task, DID_ABORT << 16);
1085 continue;
1086 }
1087 rc = iscsi_xmit_task(conn);
1088 if (rc)
1089 goto again;
1090 /*
1091 * we could continuously get new task requests so
1092 * we need to check the mgmt queue for nops that need to
1093 * be sent to aviod starvation
1094 */
1095 if (!list_empty(&conn->mgmtqueue))
1096 goto check_mgmt;
1097 }
1098
1099 while (!list_empty(&conn->requeue)) {
1100 if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
1101 break;
1102
1103 /*
1104 * we always do fastlogout - conn stop code will clean up.
1105 */
1106 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1107 break;
1108
1109 conn->task = list_entry(conn->requeue.next,
1110 struct iscsi_task, running);
1111 conn->task->state = ISCSI_TASK_RUNNING;
1112 list_move_tail(conn->requeue.next, &conn->run_list);
1113 rc = iscsi_xmit_task(conn);
1114 if (rc)
1115 goto again;
1116 if (!list_empty(&conn->mgmtqueue))
1117 goto check_mgmt;
1118 }
1119 spin_unlock_bh(&conn->session->lock);
1120 return -ENODATA;
1121
1122 again:
1123 if (unlikely(conn->suspend_tx))
1124 rc = -ENODATA;
1125 spin_unlock_bh(&conn->session->lock);
1126 return rc;
1127 }
1128
1129 static void iscsi_xmitworker(struct work_struct *work)
1130 {
1131 struct iscsi_conn *conn =
1132 container_of(work, struct iscsi_conn, xmitwork);
1133 int rc;
1134 /*
1135 * serialize Xmit worker on a per-connection basis.
1136 */
1137 do {
1138 rc = iscsi_data_xmit(conn);
1139 } while (rc >= 0 || rc == -EAGAIN);
1140 }
1141
1142 enum {
1143 FAILURE_BAD_HOST = 1,
1144 FAILURE_SESSION_FAILED,
1145 FAILURE_SESSION_FREED,
1146 FAILURE_WINDOW_CLOSED,
1147 FAILURE_OOM,
1148 FAILURE_SESSION_TERMINATE,
1149 FAILURE_SESSION_IN_RECOVERY,
1150 FAILURE_SESSION_RECOVERY_TIMEOUT,
1151 FAILURE_SESSION_LOGGING_OUT,
1152 FAILURE_SESSION_NOT_READY,
1153 };
1154
1155 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1156 {
1157 struct iscsi_cls_session *cls_session;
1158 struct Scsi_Host *host;
1159 int reason = 0;
1160 struct iscsi_session *session;
1161 struct iscsi_conn *conn;
1162 struct iscsi_task *task = NULL;
1163
1164 sc->scsi_done = done;
1165 sc->result = 0;
1166 sc->SCp.ptr = NULL;
1167
1168 host = sc->device->host;
1169 spin_unlock(host->host_lock);
1170
1171 cls_session = starget_to_session(scsi_target(sc->device));
1172 session = cls_session->dd_data;
1173 spin_lock(&session->lock);
1174
1175 reason = iscsi_session_chkready(cls_session);
1176 if (reason) {
1177 sc->result = reason;
1178 goto fault;
1179 }
1180
1181 /*
1182 * ISCSI_STATE_FAILED is a temp. state. The recovery
1183 * code will decide what is best to do with command queued
1184 * during this time
1185 */
1186 if (session->state != ISCSI_STATE_LOGGED_IN &&
1187 session->state != ISCSI_STATE_FAILED) {
1188 /*
1189 * to handle the race between when we set the recovery state
1190 * and block the session we requeue here (commands could
1191 * be entering our queuecommand while a block is starting
1192 * up because the block code is not locked)
1193 */
1194 switch (session->state) {
1195 case ISCSI_STATE_IN_RECOVERY:
1196 reason = FAILURE_SESSION_IN_RECOVERY;
1197 sc->result = DID_IMM_RETRY << 16;
1198 break;
1199 case ISCSI_STATE_LOGGING_OUT:
1200 reason = FAILURE_SESSION_LOGGING_OUT;
1201 sc->result = DID_IMM_RETRY << 16;
1202 break;
1203 case ISCSI_STATE_RECOVERY_FAILED:
1204 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1205 sc->result = DID_NO_CONNECT << 16;
1206 break;
1207 case ISCSI_STATE_TERMINATE:
1208 reason = FAILURE_SESSION_TERMINATE;
1209 sc->result = DID_NO_CONNECT << 16;
1210 break;
1211 default:
1212 reason = FAILURE_SESSION_FREED;
1213 sc->result = DID_NO_CONNECT << 16;
1214 }
1215 goto fault;
1216 }
1217
1218 conn = session->leadconn;
1219 if (!conn) {
1220 reason = FAILURE_SESSION_FREED;
1221 sc->result = DID_NO_CONNECT << 16;
1222 goto fault;
1223 }
1224
1225 if (iscsi_check_cmdsn_window_closed(conn)) {
1226 reason = FAILURE_WINDOW_CLOSED;
1227 goto reject;
1228 }
1229
1230 if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
1231 sizeof(void*))) {
1232 reason = FAILURE_OOM;
1233 goto reject;
1234 }
1235 sc->SCp.phase = session->age;
1236 sc->SCp.ptr = (char *)task;
1237
1238 atomic_set(&task->refcount, 1);
1239 task->state = ISCSI_TASK_PENDING;
1240 task->conn = conn;
1241 task->sc = sc;
1242 INIT_LIST_HEAD(&task->running);
1243 list_add_tail(&task->running, &conn->xmitqueue);
1244
1245 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
1246 if (iscsi_prep_scsi_cmd_pdu(task)) {
1247 sc->result = DID_ABORT << 16;
1248 sc->scsi_done = NULL;
1249 iscsi_complete_command(task);
1250 goto fault;
1251 }
1252 if (session->tt->xmit_task(task)) {
1253 sc->scsi_done = NULL;
1254 iscsi_complete_command(task);
1255 reason = FAILURE_SESSION_NOT_READY;
1256 goto reject;
1257 }
1258 } else
1259 scsi_queue_work(session->host, &conn->xmitwork);
1260
1261 session->queued_cmdsn++;
1262 spin_unlock(&session->lock);
1263 spin_lock(host->host_lock);
1264 return 0;
1265
1266 reject:
1267 spin_unlock(&session->lock);
1268 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
1269 spin_lock(host->host_lock);
1270 return SCSI_MLQUEUE_HOST_BUSY;
1271
1272 fault:
1273 spin_unlock(&session->lock);
1274 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
1275 if (!scsi_bidi_cmnd(sc))
1276 scsi_set_resid(sc, scsi_bufflen(sc));
1277 else {
1278 scsi_out(sc)->resid = scsi_out(sc)->length;
1279 scsi_in(sc)->resid = scsi_in(sc)->length;
1280 }
1281 done(sc);
1282 spin_lock(host->host_lock);
1283 return 0;
1284 }
1285 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1286
1287 int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
1288 {
1289 if (depth > ISCSI_MAX_CMD_PER_LUN)
1290 depth = ISCSI_MAX_CMD_PER_LUN;
1291 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1292 return sdev->queue_depth;
1293 }
1294 EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1295
1296 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
1297 {
1298 struct iscsi_session *session = cls_session->dd_data;
1299
1300 spin_lock_bh(&session->lock);
1301 if (session->state != ISCSI_STATE_LOGGED_IN) {
1302 session->state = ISCSI_STATE_RECOVERY_FAILED;
1303 if (session->leadconn)
1304 wake_up(&session->leadconn->ehwait);
1305 }
1306 spin_unlock_bh(&session->lock);
1307 }
1308 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
1309
1310 int iscsi_eh_host_reset(struct scsi_cmnd *sc)
1311 {
1312 struct iscsi_cls_session *cls_session;
1313 struct iscsi_session *session;
1314 struct iscsi_conn *conn;
1315
1316 cls_session = starget_to_session(scsi_target(sc->device));
1317 session = cls_session->dd_data;
1318 conn = session->leadconn;
1319
1320 mutex_lock(&session->eh_mutex);
1321 spin_lock_bh(&session->lock);
1322 if (session->state == ISCSI_STATE_TERMINATE) {
1323 failed:
1324 debug_scsi("failing host reset: session terminated "
1325 "[CID %d age %d]\n", conn->id, session->age);
1326 spin_unlock_bh(&session->lock);
1327 mutex_unlock(&session->eh_mutex);
1328 return FAILED;
1329 }
1330
1331 spin_unlock_bh(&session->lock);
1332 mutex_unlock(&session->eh_mutex);
1333 /*
1334 * we drop the lock here but the leadconn cannot be destoyed while
1335 * we are in the scsi eh
1336 */
1337 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1338
1339 debug_scsi("iscsi_eh_host_reset wait for relogin\n");
1340 wait_event_interruptible(conn->ehwait,
1341 session->state == ISCSI_STATE_TERMINATE ||
1342 session->state == ISCSI_STATE_LOGGED_IN ||
1343 session->state == ISCSI_STATE_RECOVERY_FAILED);
1344 if (signal_pending(current))
1345 flush_signals(current);
1346
1347 mutex_lock(&session->eh_mutex);
1348 spin_lock_bh(&session->lock);
1349 if (session->state == ISCSI_STATE_LOGGED_IN)
1350 iscsi_session_printk(KERN_INFO, session,
1351 "host reset succeeded\n");
1352 else
1353 goto failed;
1354 spin_unlock_bh(&session->lock);
1355 mutex_unlock(&session->eh_mutex);
1356 return SUCCESS;
1357 }
1358 EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
1359
1360 static void iscsi_tmf_timedout(unsigned long data)
1361 {
1362 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1363 struct iscsi_session *session = conn->session;
1364
1365 spin_lock(&session->lock);
1366 if (conn->tmf_state == TMF_QUEUED) {
1367 conn->tmf_state = TMF_TIMEDOUT;
1368 debug_scsi("tmf timedout\n");
1369 /* unblock eh_abort() */
1370 wake_up(&conn->ehwait);
1371 }
1372 spin_unlock(&session->lock);
1373 }
1374
1375 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1376 struct iscsi_tm *hdr, int age,
1377 int timeout)
1378 {
1379 struct iscsi_session *session = conn->session;
1380 struct iscsi_task *task;
1381
1382 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1383 NULL, 0);
1384 if (!task) {
1385 spin_unlock_bh(&session->lock);
1386 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1387 spin_lock_bh(&session->lock);
1388 debug_scsi("tmf exec failure\n");
1389 return -EPERM;
1390 }
1391 conn->tmfcmd_pdus_cnt++;
1392 conn->tmf_timer.expires = timeout * HZ + jiffies;
1393 conn->tmf_timer.function = iscsi_tmf_timedout;
1394 conn->tmf_timer.data = (unsigned long)conn;
1395 add_timer(&conn->tmf_timer);
1396 debug_scsi("tmf set timeout\n");
1397
1398 spin_unlock_bh(&session->lock);
1399 mutex_unlock(&session->eh_mutex);
1400
1401 /*
1402 * block eh thread until:
1403 *
1404 * 1) tmf response
1405 * 2) tmf timeout
1406 * 3) session is terminated or restarted or userspace has
1407 * given up on recovery
1408 */
1409 wait_event_interruptible(conn->ehwait, age != session->age ||
1410 session->state != ISCSI_STATE_LOGGED_IN ||
1411 conn->tmf_state != TMF_QUEUED);
1412 if (signal_pending(current))
1413 flush_signals(current);
1414 del_timer_sync(&conn->tmf_timer);
1415
1416 mutex_lock(&session->eh_mutex);
1417 spin_lock_bh(&session->lock);
1418 /* if the session drops it will clean up the task */
1419 if (age != session->age ||
1420 session->state != ISCSI_STATE_LOGGED_IN)
1421 return -ENOTCONN;
1422 return 0;
1423 }
1424
1425 /*
1426 * Fail commands. session lock held and recv side suspended and xmit
1427 * thread flushed
1428 */
1429 static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1430 int error)
1431 {
1432 struct iscsi_task *task, *tmp;
1433
1434 if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
1435 conn->task = NULL;
1436
1437 /* flush pending */
1438 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
1439 if (lun == task->sc->device->lun || lun == -1) {
1440 debug_scsi("failing pending sc %p itt 0x%x\n",
1441 task->sc, task->itt);
1442 fail_command(conn, task, error << 16);
1443 }
1444 }
1445
1446 list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
1447 if (lun == task->sc->device->lun || lun == -1) {
1448 debug_scsi("failing requeued sc %p itt 0x%x\n",
1449 task->sc, task->itt);
1450 fail_command(conn, task, error << 16);
1451 }
1452 }
1453
1454 /* fail all other running */
1455 list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
1456 if (lun == task->sc->device->lun || lun == -1) {
1457 debug_scsi("failing in progress sc %p itt 0x%x\n",
1458 task->sc, task->itt);
1459 fail_command(conn, task, DID_BUS_BUSY << 16);
1460 }
1461 }
1462 }
1463
1464 void iscsi_suspend_tx(struct iscsi_conn *conn)
1465 {
1466 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1467 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1468 scsi_flush_work(conn->session->host);
1469 }
1470 EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1471
1472 static void iscsi_start_tx(struct iscsi_conn *conn)
1473 {
1474 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1475 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1476 scsi_queue_work(conn->session->host, &conn->xmitwork);
1477 }
1478
1479 static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1480 {
1481 struct iscsi_cls_session *cls_session;
1482 struct iscsi_session *session;
1483 struct iscsi_conn *conn;
1484 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1485
1486 cls_session = starget_to_session(scsi_target(scmd->device));
1487 session = cls_session->dd_data;
1488
1489 debug_scsi("scsi cmd %p timedout\n", scmd);
1490
1491 spin_lock(&session->lock);
1492 if (session->state != ISCSI_STATE_LOGGED_IN) {
1493 /*
1494 * We are probably in the middle of iscsi recovery so let
1495 * that complete and handle the error.
1496 */
1497 rc = BLK_EH_RESET_TIMER;
1498 goto done;
1499 }
1500
1501 conn = session->leadconn;
1502 if (!conn) {
1503 /* In the middle of shuting down */
1504 rc = BLK_EH_RESET_TIMER;
1505 goto done;
1506 }
1507
1508 if (!conn->recv_timeout && !conn->ping_timeout)
1509 goto done;
1510 /*
1511 * if the ping timedout then we are in the middle of cleaning up
1512 * and can let the iscsi eh handle it
1513 */
1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1515 (conn->ping_timeout * HZ), jiffies))
1516 rc = BLK_EH_RESET_TIMER;
1517 /*
1518 * if we are about to check the transport then give the command
1519 * more time
1520 */
1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1522 jiffies))
1523 rc = BLK_EH_RESET_TIMER;
1524 /* if in the middle of checking the transport then give us more time */
1525 if (conn->ping_task)
1526 rc = BLK_EH_RESET_TIMER;
1527 done:
1528 spin_unlock(&session->lock);
1529 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
1530 "timer reset" : "nh");
1531 return rc;
1532 }
1533
1534 static void iscsi_check_transport_timeouts(unsigned long data)
1535 {
1536 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1537 struct iscsi_session *session = conn->session;
1538 unsigned long recv_timeout, next_timeout = 0, last_recv;
1539
1540 spin_lock(&session->lock);
1541 if (session->state != ISCSI_STATE_LOGGED_IN)
1542 goto done;
1543
1544 recv_timeout = conn->recv_timeout;
1545 if (!recv_timeout)
1546 goto done;
1547
1548 recv_timeout *= HZ;
1549 last_recv = conn->last_recv;
1550 if (conn->ping_task &&
1551 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
1552 jiffies)) {
1553 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1554 "expired, last rx %lu, last ping %lu, "
1555 "now %lu\n", conn->ping_timeout, last_recv,
1556 conn->last_ping, jiffies);
1557 spin_unlock(&session->lock);
1558 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1559 return;
1560 }
1561
1562 if (time_before_eq(last_recv + recv_timeout, jiffies)) {
1563 /* send a ping to try to provoke some traffic */
1564 debug_scsi("Sending nopout as ping on conn %p\n", conn);
1565 iscsi_send_nopout(conn, NULL);
1566 next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
1567 } else
1568 next_timeout = last_recv + recv_timeout;
1569
1570 debug_scsi("Setting next tmo %lu\n", next_timeout);
1571 mod_timer(&conn->transport_timer, next_timeout);
1572 done:
1573 spin_unlock(&session->lock);
1574 }
1575
1576 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
1577 struct iscsi_tm *hdr)
1578 {
1579 memset(hdr, 0, sizeof(*hdr));
1580 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1581 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1582 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1583 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
1584 hdr->rtt = task->hdr->itt;
1585 hdr->refcmdsn = task->hdr->cmdsn;
1586 }
1587
1588 int iscsi_eh_abort(struct scsi_cmnd *sc)
1589 {
1590 struct iscsi_cls_session *cls_session;
1591 struct iscsi_session *session;
1592 struct iscsi_conn *conn;
1593 struct iscsi_task *task;
1594 struct iscsi_tm *hdr;
1595 int rc, age;
1596
1597 cls_session = starget_to_session(scsi_target(sc->device));
1598 session = cls_session->dd_data;
1599
1600 mutex_lock(&session->eh_mutex);
1601 spin_lock_bh(&session->lock);
1602 /*
1603 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
1604 * got the command.
1605 */
1606 if (!sc->SCp.ptr) {
1607 debug_scsi("sc never reached iscsi layer or it completed.\n");
1608 spin_unlock_bh(&session->lock);
1609 mutex_unlock(&session->eh_mutex);
1610 return SUCCESS;
1611 }
1612
1613 /*
1614 * If we are not logged in or we have started a new session
1615 * then let the host reset code handle this
1616 */
1617 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
1618 sc->SCp.phase != session->age) {
1619 spin_unlock_bh(&session->lock);
1620 mutex_unlock(&session->eh_mutex);
1621 return FAILED;
1622 }
1623
1624 conn = session->leadconn;
1625 conn->eh_abort_cnt++;
1626 age = session->age;
1627
1628 task = (struct iscsi_task *)sc->SCp.ptr;
1629 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
1630
1631 /* task completed before time out */
1632 if (!task->sc) {
1633 debug_scsi("sc completed while abort in progress\n");
1634 goto success;
1635 }
1636
1637 if (task->state == ISCSI_TASK_PENDING) {
1638 fail_command(conn, task, DID_ABORT << 16);
1639 goto success;
1640 }
1641
1642 /* only have one tmf outstanding at a time */
1643 if (conn->tmf_state != TMF_INITIAL)
1644 goto failed;
1645 conn->tmf_state = TMF_QUEUED;
1646
1647 hdr = &conn->tmhdr;
1648 iscsi_prep_abort_task_pdu(task, hdr);
1649
1650 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
1651 rc = FAILED;
1652 goto failed;
1653 }
1654
1655 switch (conn->tmf_state) {
1656 case TMF_SUCCESS:
1657 spin_unlock_bh(&session->lock);
1658 /*
1659 * stop tx side incase the target had sent a abort rsp but
1660 * the initiator was still writing out data.
1661 */
1662 iscsi_suspend_tx(conn);
1663 /*
1664 * we do not stop the recv side because targets have been
1665 * good and have never sent us a successful tmf response
1666 * then sent more data for the cmd.
1667 */
1668 spin_lock(&session->lock);
1669 fail_command(conn, task, DID_ABORT << 16);
1670 conn->tmf_state = TMF_INITIAL;
1671 spin_unlock(&session->lock);
1672 iscsi_start_tx(conn);
1673 goto success_unlocked;
1674 case TMF_TIMEDOUT:
1675 spin_unlock_bh(&session->lock);
1676 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1677 goto failed_unlocked;
1678 case TMF_NOT_FOUND:
1679 if (!sc->SCp.ptr) {
1680 conn->tmf_state = TMF_INITIAL;
1681 /* task completed before tmf abort response */
1682 debug_scsi("sc completed while abort in progress\n");
1683 goto success;
1684 }
1685 /* fall through */
1686 default:
1687 conn->tmf_state = TMF_INITIAL;
1688 goto failed;
1689 }
1690
1691 success:
1692 spin_unlock_bh(&session->lock);
1693 success_unlocked:
1694 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
1695 mutex_unlock(&session->eh_mutex);
1696 return SUCCESS;
1697
1698 failed:
1699 spin_unlock_bh(&session->lock);
1700 failed_unlocked:
1701 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
1702 task ? task->itt : 0);
1703 mutex_unlock(&session->eh_mutex);
1704 return FAILED;
1705 }
1706 EXPORT_SYMBOL_GPL(iscsi_eh_abort);
1707
1708 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
1709 {
1710 memset(hdr, 0, sizeof(*hdr));
1711 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1712 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
1713 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1714 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
1715 hdr->rtt = RESERVED_ITT;
1716 }
1717
1718 int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1719 {
1720 struct iscsi_cls_session *cls_session;
1721 struct iscsi_session *session;
1722 struct iscsi_conn *conn;
1723 struct iscsi_tm *hdr;
1724 int rc = FAILED;
1725
1726 cls_session = starget_to_session(scsi_target(sc->device));
1727 session = cls_session->dd_data;
1728
1729 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
1730
1731 mutex_lock(&session->eh_mutex);
1732 spin_lock_bh(&session->lock);
1733 /*
1734 * Just check if we are not logged in. We cannot check for
1735 * the phase because the reset could come from a ioctl.
1736 */
1737 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
1738 goto unlock;
1739 conn = session->leadconn;
1740
1741 /* only have one tmf outstanding at a time */
1742 if (conn->tmf_state != TMF_INITIAL)
1743 goto unlock;
1744 conn->tmf_state = TMF_QUEUED;
1745
1746 hdr = &conn->tmhdr;
1747 iscsi_prep_lun_reset_pdu(sc, hdr);
1748
1749 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
1750 session->lu_reset_timeout)) {
1751 rc = FAILED;
1752 goto unlock;
1753 }
1754
1755 switch (conn->tmf_state) {
1756 case TMF_SUCCESS:
1757 break;
1758 case TMF_TIMEDOUT:
1759 spin_unlock_bh(&session->lock);
1760 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1761 goto done;
1762 default:
1763 conn->tmf_state = TMF_INITIAL;
1764 goto unlock;
1765 }
1766
1767 rc = SUCCESS;
1768 spin_unlock_bh(&session->lock);
1769
1770 iscsi_suspend_tx(conn);
1771
1772 spin_lock(&session->lock);
1773 fail_all_commands(conn, sc->device->lun, DID_ERROR);
1774 conn->tmf_state = TMF_INITIAL;
1775 spin_unlock(&session->lock);
1776
1777 iscsi_start_tx(conn);
1778 goto done;
1779
1780 unlock:
1781 spin_unlock_bh(&session->lock);
1782 done:
1783 debug_scsi("iscsi_eh_device_reset %s\n",
1784 rc == SUCCESS ? "SUCCESS" : "FAILED");
1785 mutex_unlock(&session->eh_mutex);
1786 return rc;
1787 }
1788 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
1789
1790 /*
1791 * Pre-allocate a pool of @max items of @item_size. By default, the pool
1792 * should be accessed via kfifo_{get,put} on q->queue.
1793 * Optionally, the caller can obtain the array of object pointers
1794 * by passing in a non-NULL @items pointer
1795 */
1796 int
1797 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
1798 {
1799 int i, num_arrays = 1;
1800
1801 memset(q, 0, sizeof(*q));
1802
1803 q->max = max;
1804
1805 /* If the user passed an items pointer, he wants a copy of
1806 * the array. */
1807 if (items)
1808 num_arrays++;
1809 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
1810 if (q->pool == NULL)
1811 goto enomem;
1812
1813 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
1814 GFP_KERNEL, NULL);
1815 if (q->queue == ERR_PTR(-ENOMEM))
1816 goto enomem;
1817
1818 for (i = 0; i < max; i++) {
1819 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
1820 if (q->pool[i] == NULL) {
1821 q->max = i;
1822 goto enomem;
1823 }
1824 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
1825 }
1826
1827 if (items) {
1828 *items = q->pool + max;
1829 memcpy(*items, q->pool, max * sizeof(void *));
1830 }
1831
1832 return 0;
1833
1834 enomem:
1835 iscsi_pool_free(q);
1836 return -ENOMEM;
1837 }
1838 EXPORT_SYMBOL_GPL(iscsi_pool_init);
1839
1840 void iscsi_pool_free(struct iscsi_pool *q)
1841 {
1842 int i;
1843
1844 for (i = 0; i < q->max; i++)
1845 kfree(q->pool[i]);
1846 if (q->pool)
1847 kfree(q->pool);
1848 }
1849 EXPORT_SYMBOL_GPL(iscsi_pool_free);
1850
1851 /**
1852 * iscsi_host_add - add host to system
1853 * @shost: scsi host
1854 * @pdev: parent device
1855 *
1856 * This should be called by partial offload and software iscsi drivers
1857 * to add a host to the system.
1858 */
1859 int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
1860 {
1861 if (!shost->can_queue)
1862 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
1863
1864 return scsi_add_host(shost, pdev);
1865 }
1866 EXPORT_SYMBOL_GPL(iscsi_host_add);
1867
1868 /**
1869 * iscsi_host_alloc - allocate a host and driver data
1870 * @sht: scsi host template
1871 * @dd_data_size: driver host data size
1872 * @qdepth: default device queue depth
1873 *
1874 * This should be called by partial offload and software iscsi drivers.
1875 * To access the driver specific memory use the iscsi_host_priv() macro.
1876 */
1877 struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
1878 int dd_data_size, uint16_t qdepth)
1879 {
1880 struct Scsi_Host *shost;
1881
1882 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
1883 if (!shost)
1884 return NULL;
1885 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1886
1887 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
1888 if (qdepth != 0)
1889 printk(KERN_ERR "iscsi: invalid queue depth of %d. "
1890 "Queue depth must be between 1 and %d.\n",
1891 qdepth, ISCSI_MAX_CMD_PER_LUN);
1892 qdepth = ISCSI_DEF_CMD_PER_LUN;
1893 }
1894 shost->cmd_per_lun = qdepth;
1895 return shost;
1896 }
1897 EXPORT_SYMBOL_GPL(iscsi_host_alloc);
1898
1899 /**
1900 * iscsi_host_remove - remove host and sessions
1901 * @shost: scsi host
1902 *
1903 * This will also remove any sessions attached to the host, but if userspace
1904 * is managing the session at the same time this will break. TODO: add
1905 * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
1906 * does not remove the memory from under us.
1907 */
1908 void iscsi_host_remove(struct Scsi_Host *shost)
1909 {
1910 iscsi_host_for_each_session(shost, iscsi_session_teardown);
1911 scsi_remove_host(shost);
1912 }
1913 EXPORT_SYMBOL_GPL(iscsi_host_remove);
1914
1915 void iscsi_host_free(struct Scsi_Host *shost)
1916 {
1917 struct iscsi_host *ihost = shost_priv(shost);
1918
1919 kfree(ihost->netdev);
1920 kfree(ihost->hwaddress);
1921 kfree(ihost->initiatorname);
1922 scsi_host_put(shost);
1923 }
1924 EXPORT_SYMBOL_GPL(iscsi_host_free);
1925
1926 /**
1927 * iscsi_session_setup - create iscsi cls session and host and session
1928 * @iscsit: iscsi transport template
1929 * @shost: scsi host
1930 * @cmds_max: session can queue
1931 * @cmd_task_size: LLD task private data size
1932 * @initial_cmdsn: initial CmdSN
1933 *
1934 * This can be used by software iscsi_transports that allocate
1935 * a session per scsi host.
1936 *
1937 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
1938 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
1939 * for nop handling and login/logout requests.
1940 */
1941 struct iscsi_cls_session *
1942 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
1943 uint16_t cmds_max, int cmd_task_size,
1944 uint32_t initial_cmdsn, unsigned int id)
1945 {
1946 struct iscsi_session *session;
1947 struct iscsi_cls_session *cls_session;
1948 int cmd_i, scsi_cmds, total_cmds = cmds_max;
1949
1950 if (!total_cmds)
1951 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
1952 /*
1953 * The iscsi layer needs some tasks for nop handling and tmfs,
1954 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
1955 * + 1 command for scsi IO.
1956 */
1957 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
1958 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1959 "must be a power of two that is at least %d.\n",
1960 total_cmds, ISCSI_TOTAL_CMDS_MIN);
1961 return NULL;
1962 }
1963
1964 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
1965 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1966 "must be a power of 2 less than or equal to %d.\n",
1967 cmds_max, ISCSI_TOTAL_CMDS_MAX);
1968 total_cmds = ISCSI_TOTAL_CMDS_MAX;
1969 }
1970
1971 if (!is_power_of_2(total_cmds)) {
1972 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1973 "must be a power of 2.\n", total_cmds);
1974 total_cmds = rounddown_pow_of_two(total_cmds);
1975 if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
1976 return NULL;
1977 printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
1978 total_cmds);
1979 }
1980 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
1981
1982 cls_session = iscsi_alloc_session(shost, iscsit,
1983 sizeof(struct iscsi_session));
1984 if (!cls_session)
1985 return NULL;
1986 session = cls_session->dd_data;
1987 session->cls_session = cls_session;
1988 session->host = shost;
1989 session->state = ISCSI_STATE_FREE;
1990 session->fast_abort = 1;
1991 session->lu_reset_timeout = 15;
1992 session->abort_timeout = 10;
1993 session->scsi_cmds_max = scsi_cmds;
1994 session->cmds_max = total_cmds;
1995 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
1996 session->exp_cmdsn = initial_cmdsn + 1;
1997 session->max_cmdsn = initial_cmdsn + 1;
1998 session->max_r2t = 1;
1999 session->tt = iscsit;
2000 mutex_init(&session->eh_mutex);
2001 spin_lock_init(&session->lock);
2002
2003 /* initialize SCSI PDU commands pool */
2004 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
2005 (void***)&session->cmds,
2006 cmd_task_size + sizeof(struct iscsi_task)))
2007 goto cmdpool_alloc_fail;
2008
2009 /* pre-format cmds pool with ITT */
2010 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
2011 struct iscsi_task *task = session->cmds[cmd_i];
2012
2013 if (cmd_task_size)
2014 task->dd_data = &task[1];
2015 task->itt = cmd_i;
2016 INIT_LIST_HEAD(&task->running);
2017 }
2018
2019 if (!try_module_get(iscsit->owner))
2020 goto module_get_fail;
2021
2022 if (iscsi_add_session(cls_session, id))
2023 goto cls_session_fail;
2024 return cls_session;
2025
2026 cls_session_fail:
2027 module_put(iscsit->owner);
2028 module_get_fail:
2029 iscsi_pool_free(&session->cmdpool);
2030 cmdpool_alloc_fail:
2031 iscsi_free_session(cls_session);
2032 return NULL;
2033 }
2034 EXPORT_SYMBOL_GPL(iscsi_session_setup);
2035
2036 /**
2037 * iscsi_session_teardown - destroy session, host, and cls_session
2038 * @cls_session: iscsi session
2039 *
2040 * The driver must have called iscsi_remove_session before
2041 * calling this.
2042 */
2043 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2044 {
2045 struct iscsi_session *session = cls_session->dd_data;
2046 struct module *owner = cls_session->transport->owner;
2047
2048 iscsi_pool_free(&session->cmdpool);
2049
2050 kfree(session->password);
2051 kfree(session->password_in);
2052 kfree(session->username);
2053 kfree(session->username_in);
2054 kfree(session->targetname);
2055 kfree(session->initiatorname);
2056 kfree(session->ifacename);
2057
2058 iscsi_destroy_session(cls_session);
2059 module_put(owner);
2060 }
2061 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
2062
2063 /**
2064 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
2065 * @cls_session: iscsi_cls_session
2066 * @dd_size: private driver data size
2067 * @conn_idx: cid
2068 */
2069 struct iscsi_cls_conn *
2070 iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2071 uint32_t conn_idx)
2072 {
2073 struct iscsi_session *session = cls_session->dd_data;
2074 struct iscsi_conn *conn;
2075 struct iscsi_cls_conn *cls_conn;
2076 char *data;
2077
2078 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
2079 conn_idx);
2080 if (!cls_conn)
2081 return NULL;
2082 conn = cls_conn->dd_data;
2083 memset(conn, 0, sizeof(*conn) + dd_size);
2084
2085 conn->dd_data = cls_conn->dd_data + sizeof(*conn);
2086 conn->session = session;
2087 conn->cls_conn = cls_conn;
2088 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2089 conn->id = conn_idx;
2090 conn->exp_statsn = 0;
2091 conn->tmf_state = TMF_INITIAL;
2092
2093 init_timer(&conn->transport_timer);
2094 conn->transport_timer.data = (unsigned long)conn;
2095 conn->transport_timer.function = iscsi_check_transport_timeouts;
2096
2097 INIT_LIST_HEAD(&conn->run_list);
2098 INIT_LIST_HEAD(&conn->mgmt_run_list);
2099 INIT_LIST_HEAD(&conn->mgmtqueue);
2100 INIT_LIST_HEAD(&conn->xmitqueue);
2101 INIT_LIST_HEAD(&conn->requeue);
2102 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2103
2104 /* allocate login_task used for the login/text sequences */
2105 spin_lock_bh(&session->lock);
2106 if (!__kfifo_get(session->cmdpool.queue,
2107 (void*)&conn->login_task,
2108 sizeof(void*))) {
2109 spin_unlock_bh(&session->lock);
2110 goto login_task_alloc_fail;
2111 }
2112 spin_unlock_bh(&session->lock);
2113
2114 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
2115 if (!data)
2116 goto login_task_data_alloc_fail;
2117 conn->login_task->data = conn->data = data;
2118
2119 init_timer(&conn->tmf_timer);
2120 init_waitqueue_head(&conn->ehwait);
2121
2122 return cls_conn;
2123
2124 login_task_data_alloc_fail:
2125 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2126 sizeof(void*));
2127 login_task_alloc_fail:
2128 iscsi_destroy_conn(cls_conn);
2129 return NULL;
2130 }
2131 EXPORT_SYMBOL_GPL(iscsi_conn_setup);
2132
2133 /**
2134 * iscsi_conn_teardown - teardown iscsi connection
2135 * cls_conn: iscsi class connection
2136 *
2137 * TODO: we may need to make this into a two step process
2138 * like scsi-mls remove + put host
2139 */
2140 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2141 {
2142 struct iscsi_conn *conn = cls_conn->dd_data;
2143 struct iscsi_session *session = conn->session;
2144 unsigned long flags;
2145
2146 del_timer_sync(&conn->transport_timer);
2147
2148 spin_lock_bh(&session->lock);
2149 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2150 if (session->leadconn == conn) {
2151 /*
2152 * leading connection? then give up on recovery.
2153 */
2154 session->state = ISCSI_STATE_TERMINATE;
2155 wake_up(&conn->ehwait);
2156 }
2157 spin_unlock_bh(&session->lock);
2158
2159 /*
2160 * Block until all in-progress commands for this connection
2161 * time out or fail.
2162 */
2163 for (;;) {
2164 spin_lock_irqsave(session->host->host_lock, flags);
2165 if (!session->host->host_busy) { /* OK for ERL == 0 */
2166 spin_unlock_irqrestore(session->host->host_lock, flags);
2167 break;
2168 }
2169 spin_unlock_irqrestore(session->host->host_lock, flags);
2170 msleep_interruptible(500);
2171 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2172 "host_busy %d host_failed %d\n",
2173 session->host->host_busy,
2174 session->host->host_failed);
2175 /*
2176 * force eh_abort() to unblock
2177 */
2178 wake_up(&conn->ehwait);
2179 }
2180
2181 /* flush queued up work because we free the connection below */
2182 iscsi_suspend_tx(conn);
2183
2184 spin_lock_bh(&session->lock);
2185 kfree(conn->data);
2186 kfree(conn->persistent_address);
2187 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2188 sizeof(void*));
2189 if (session->leadconn == conn)
2190 session->leadconn = NULL;
2191 spin_unlock_bh(&session->lock);
2192
2193 iscsi_destroy_conn(cls_conn);
2194 }
2195 EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
2196
2197 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2198 {
2199 struct iscsi_conn *conn = cls_conn->dd_data;
2200 struct iscsi_session *session = conn->session;
2201
2202 if (!session) {
2203 iscsi_conn_printk(KERN_ERR, conn,
2204 "can't start unbound connection\n");
2205 return -EPERM;
2206 }
2207
2208 if ((session->imm_data_en || !session->initial_r2t_en) &&
2209 session->first_burst > session->max_burst) {
2210 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
2211 "first_burst %d max_burst %d\n",
2212 session->first_burst, session->max_burst);
2213 return -EINVAL;
2214 }
2215
2216 if (conn->ping_timeout && !conn->recv_timeout) {
2217 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
2218 "zero. Using 5 seconds\n.");
2219 conn->recv_timeout = 5;
2220 }
2221
2222 if (conn->recv_timeout && !conn->ping_timeout) {
2223 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
2224 "zero. Using 5 seconds.\n");
2225 conn->ping_timeout = 5;
2226 }
2227
2228 spin_lock_bh(&session->lock);
2229 conn->c_stage = ISCSI_CONN_STARTED;
2230 session->state = ISCSI_STATE_LOGGED_IN;
2231 session->queued_cmdsn = session->cmdsn;
2232
2233 conn->last_recv = jiffies;
2234 conn->last_ping = jiffies;
2235 if (conn->recv_timeout && conn->ping_timeout)
2236 mod_timer(&conn->transport_timer,
2237 jiffies + (conn->recv_timeout * HZ));
2238
2239 switch(conn->stop_stage) {
2240 case STOP_CONN_RECOVER:
2241 /*
2242 * unblock eh_abort() if it is blocked. re-try all
2243 * commands after successful recovery
2244 */
2245 conn->stop_stage = 0;
2246 conn->tmf_state = TMF_INITIAL;
2247 session->age++;
2248 if (session->age == 16)
2249 session->age = 0;
2250 break;
2251 case STOP_CONN_TERM:
2252 conn->stop_stage = 0;
2253 break;
2254 default:
2255 break;
2256 }
2257 spin_unlock_bh(&session->lock);
2258
2259 iscsi_unblock_session(session->cls_session);
2260 wake_up(&conn->ehwait);
2261 return 0;
2262 }
2263 EXPORT_SYMBOL_GPL(iscsi_conn_start);
2264
2265 static void
2266 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
2267 {
2268 struct iscsi_task *task, *tmp;
2269
2270 /* handle pending */
2271 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
2272 debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
2273 /* release ref from prep task */
2274 __iscsi_put_task(task);
2275 }
2276
2277 /* handle running */
2278 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
2279 debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
2280 /* release ref from prep task */
2281 __iscsi_put_task(task);
2282 }
2283
2284 conn->task = NULL;
2285 }
2286
2287 static void iscsi_start_session_recovery(struct iscsi_session *session,
2288 struct iscsi_conn *conn, int flag)
2289 {
2290 int old_stop_stage;
2291
2292 del_timer_sync(&conn->transport_timer);
2293
2294 mutex_lock(&session->eh_mutex);
2295 spin_lock_bh(&session->lock);
2296 if (conn->stop_stage == STOP_CONN_TERM) {
2297 spin_unlock_bh(&session->lock);
2298 mutex_unlock(&session->eh_mutex);
2299 return;
2300 }
2301
2302 /*
2303 * When this is called for the in_login state, we only want to clean
2304 * up the login task and connection. We do not need to block and set
2305 * the recovery state again
2306 */
2307 if (flag == STOP_CONN_TERM)
2308 session->state = ISCSI_STATE_TERMINATE;
2309 else if (conn->stop_stage != STOP_CONN_RECOVER)
2310 session->state = ISCSI_STATE_IN_RECOVERY;
2311
2312 old_stop_stage = conn->stop_stage;
2313 conn->stop_stage = flag;
2314 conn->c_stage = ISCSI_CONN_STOPPED;
2315 spin_unlock_bh(&session->lock);
2316
2317 iscsi_suspend_tx(conn);
2318 /*
2319 * for connection level recovery we should not calculate
2320 * header digest. conn->hdr_size used for optimization
2321 * in hdr_extract() and will be re-negotiated at
2322 * set_param() time.
2323 */
2324 if (flag == STOP_CONN_RECOVER) {
2325 conn->hdrdgst_en = 0;
2326 conn->datadgst_en = 0;
2327 if (session->state == ISCSI_STATE_IN_RECOVERY &&
2328 old_stop_stage != STOP_CONN_RECOVER) {
2329 debug_scsi("blocking session\n");
2330 iscsi_block_session(session->cls_session);
2331 }
2332 }
2333
2334 /*
2335 * flush queues.
2336 */
2337 spin_lock_bh(&session->lock);
2338 fail_all_commands(conn, -1,
2339 STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
2340 flush_control_queues(session, conn);
2341 spin_unlock_bh(&session->lock);
2342 mutex_unlock(&session->eh_mutex);
2343 }
2344
2345 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2346 {
2347 struct iscsi_conn *conn = cls_conn->dd_data;
2348 struct iscsi_session *session = conn->session;
2349
2350 switch (flag) {
2351 case STOP_CONN_RECOVER:
2352 case STOP_CONN_TERM:
2353 iscsi_start_session_recovery(session, conn, flag);
2354 break;
2355 default:
2356 iscsi_conn_printk(KERN_ERR, conn,
2357 "invalid stop flag %d\n", flag);
2358 }
2359 }
2360 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
2361
2362 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2363 struct iscsi_cls_conn *cls_conn, int is_leading)
2364 {
2365 struct iscsi_session *session = cls_session->dd_data;
2366 struct iscsi_conn *conn = cls_conn->dd_data;
2367
2368 spin_lock_bh(&session->lock);
2369 if (is_leading)
2370 session->leadconn = conn;
2371 spin_unlock_bh(&session->lock);
2372
2373 /*
2374 * Unblock xmitworker(), Login Phase will pass through.
2375 */
2376 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2377 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2378 return 0;
2379 }
2380 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2381
2382
2383 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2384 enum iscsi_param param, char *buf, int buflen)
2385 {
2386 struct iscsi_conn *conn = cls_conn->dd_data;
2387 struct iscsi_session *session = conn->session;
2388 uint32_t value;
2389
2390 switch(param) {
2391 case ISCSI_PARAM_FAST_ABORT:
2392 sscanf(buf, "%d", &session->fast_abort);
2393 break;
2394 case ISCSI_PARAM_ABORT_TMO:
2395 sscanf(buf, "%d", &session->abort_timeout);
2396 break;
2397 case ISCSI_PARAM_LU_RESET_TMO:
2398 sscanf(buf, "%d", &session->lu_reset_timeout);
2399 break;
2400 case ISCSI_PARAM_PING_TMO:
2401 sscanf(buf, "%d", &conn->ping_timeout);
2402 break;
2403 case ISCSI_PARAM_RECV_TMO:
2404 sscanf(buf, "%d", &conn->recv_timeout);
2405 break;
2406 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2407 sscanf(buf, "%d", &conn->max_recv_dlength);
2408 break;
2409 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2410 sscanf(buf, "%d", &conn->max_xmit_dlength);
2411 break;
2412 case ISCSI_PARAM_HDRDGST_EN:
2413 sscanf(buf, "%d", &conn->hdrdgst_en);
2414 break;
2415 case ISCSI_PARAM_DATADGST_EN:
2416 sscanf(buf, "%d", &conn->datadgst_en);
2417 break;
2418 case ISCSI_PARAM_INITIAL_R2T_EN:
2419 sscanf(buf, "%d", &session->initial_r2t_en);
2420 break;
2421 case ISCSI_PARAM_MAX_R2T:
2422 sscanf(buf, "%d", &session->max_r2t);
2423 break;
2424 case ISCSI_PARAM_IMM_DATA_EN:
2425 sscanf(buf, "%d", &session->imm_data_en);
2426 break;
2427 case ISCSI_PARAM_FIRST_BURST:
2428 sscanf(buf, "%d", &session->first_burst);
2429 break;
2430 case ISCSI_PARAM_MAX_BURST:
2431 sscanf(buf, "%d", &session->max_burst);
2432 break;
2433 case ISCSI_PARAM_PDU_INORDER_EN:
2434 sscanf(buf, "%d", &session->pdu_inorder_en);
2435 break;
2436 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2437 sscanf(buf, "%d", &session->dataseq_inorder_en);
2438 break;
2439 case ISCSI_PARAM_ERL:
2440 sscanf(buf, "%d", &session->erl);
2441 break;
2442 case ISCSI_PARAM_IFMARKER_EN:
2443 sscanf(buf, "%d", &value);
2444 BUG_ON(value);
2445 break;
2446 case ISCSI_PARAM_OFMARKER_EN:
2447 sscanf(buf, "%d", &value);
2448 BUG_ON(value);
2449 break;
2450 case ISCSI_PARAM_EXP_STATSN:
2451 sscanf(buf, "%u", &conn->exp_statsn);
2452 break;
2453 case ISCSI_PARAM_USERNAME:
2454 kfree(session->username);
2455 session->username = kstrdup(buf, GFP_KERNEL);
2456 if (!session->username)
2457 return -ENOMEM;
2458 break;
2459 case ISCSI_PARAM_USERNAME_IN:
2460 kfree(session->username_in);
2461 session->username_in = kstrdup(buf, GFP_KERNEL);
2462 if (!session->username_in)
2463 return -ENOMEM;
2464 break;
2465 case ISCSI_PARAM_PASSWORD:
2466 kfree(session->password);
2467 session->password = kstrdup(buf, GFP_KERNEL);
2468 if (!session->password)
2469 return -ENOMEM;
2470 break;
2471 case ISCSI_PARAM_PASSWORD_IN:
2472 kfree(session->password_in);
2473 session->password_in = kstrdup(buf, GFP_KERNEL);
2474 if (!session->password_in)
2475 return -ENOMEM;
2476 break;
2477 case ISCSI_PARAM_TARGET_NAME:
2478 /* this should not change between logins */
2479 if (session->targetname)
2480 break;
2481
2482 session->targetname = kstrdup(buf, GFP_KERNEL);
2483 if (!session->targetname)
2484 return -ENOMEM;
2485 break;
2486 case ISCSI_PARAM_TPGT:
2487 sscanf(buf, "%d", &session->tpgt);
2488 break;
2489 case ISCSI_PARAM_PERSISTENT_PORT:
2490 sscanf(buf, "%d", &conn->persistent_port);
2491 break;
2492 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2493 /*
2494 * this is the address returned in discovery so it should
2495 * not change between logins.
2496 */
2497 if (conn->persistent_address)
2498 break;
2499
2500 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2501 if (!conn->persistent_address)
2502 return -ENOMEM;
2503 break;
2504 case ISCSI_PARAM_IFACE_NAME:
2505 if (!session->ifacename)
2506 session->ifacename = kstrdup(buf, GFP_KERNEL);
2507 break;
2508 case ISCSI_PARAM_INITIATOR_NAME:
2509 if (!session->initiatorname)
2510 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2511 break;
2512 default:
2513 return -ENOSYS;
2514 }
2515
2516 return 0;
2517 }
2518 EXPORT_SYMBOL_GPL(iscsi_set_param);
2519
2520 int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2521 enum iscsi_param param, char *buf)
2522 {
2523 struct iscsi_session *session = cls_session->dd_data;
2524 int len;
2525
2526 switch(param) {
2527 case ISCSI_PARAM_FAST_ABORT:
2528 len = sprintf(buf, "%d\n", session->fast_abort);
2529 break;
2530 case ISCSI_PARAM_ABORT_TMO:
2531 len = sprintf(buf, "%d\n", session->abort_timeout);
2532 break;
2533 case ISCSI_PARAM_LU_RESET_TMO:
2534 len = sprintf(buf, "%d\n", session->lu_reset_timeout);
2535 break;
2536 case ISCSI_PARAM_INITIAL_R2T_EN:
2537 len = sprintf(buf, "%d\n", session->initial_r2t_en);
2538 break;
2539 case ISCSI_PARAM_MAX_R2T:
2540 len = sprintf(buf, "%hu\n", session->max_r2t);
2541 break;
2542 case ISCSI_PARAM_IMM_DATA_EN:
2543 len = sprintf(buf, "%d\n", session->imm_data_en);
2544 break;
2545 case ISCSI_PARAM_FIRST_BURST:
2546 len = sprintf(buf, "%u\n", session->first_burst);
2547 break;
2548 case ISCSI_PARAM_MAX_BURST:
2549 len = sprintf(buf, "%u\n", session->max_burst);
2550 break;
2551 case ISCSI_PARAM_PDU_INORDER_EN:
2552 len = sprintf(buf, "%d\n", session->pdu_inorder_en);
2553 break;
2554 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2555 len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
2556 break;
2557 case ISCSI_PARAM_ERL:
2558 len = sprintf(buf, "%d\n", session->erl);
2559 break;
2560 case ISCSI_PARAM_TARGET_NAME:
2561 len = sprintf(buf, "%s\n", session->targetname);
2562 break;
2563 case ISCSI_PARAM_TPGT:
2564 len = sprintf(buf, "%d\n", session->tpgt);
2565 break;
2566 case ISCSI_PARAM_USERNAME:
2567 len = sprintf(buf, "%s\n", session->username);
2568 break;
2569 case ISCSI_PARAM_USERNAME_IN:
2570 len = sprintf(buf, "%s\n", session->username_in);
2571 break;
2572 case ISCSI_PARAM_PASSWORD:
2573 len = sprintf(buf, "%s\n", session->password);
2574 break;
2575 case ISCSI_PARAM_PASSWORD_IN:
2576 len = sprintf(buf, "%s\n", session->password_in);
2577 break;
2578 case ISCSI_PARAM_IFACE_NAME:
2579 len = sprintf(buf, "%s\n", session->ifacename);
2580 break;
2581 case ISCSI_PARAM_INITIATOR_NAME:
2582 if (!session->initiatorname)
2583 len = sprintf(buf, "%s\n", "unknown");
2584 else
2585 len = sprintf(buf, "%s\n", session->initiatorname);
2586 break;
2587 default:
2588 return -ENOSYS;
2589 }
2590
2591 return len;
2592 }
2593 EXPORT_SYMBOL_GPL(iscsi_session_get_param);
2594
2595 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
2596 enum iscsi_param param, char *buf)
2597 {
2598 struct iscsi_conn *conn = cls_conn->dd_data;
2599 int len;
2600
2601 switch(param) {
2602 case ISCSI_PARAM_PING_TMO:
2603 len = sprintf(buf, "%u\n", conn->ping_timeout);
2604 break;
2605 case ISCSI_PARAM_RECV_TMO:
2606 len = sprintf(buf, "%u\n", conn->recv_timeout);
2607 break;
2608 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2609 len = sprintf(buf, "%u\n", conn->max_recv_dlength);
2610 break;
2611 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2612 len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
2613 break;
2614 case ISCSI_PARAM_HDRDGST_EN:
2615 len = sprintf(buf, "%d\n", conn->hdrdgst_en);
2616 break;
2617 case ISCSI_PARAM_DATADGST_EN:
2618 len = sprintf(buf, "%d\n", conn->datadgst_en);
2619 break;
2620 case ISCSI_PARAM_IFMARKER_EN:
2621 len = sprintf(buf, "%d\n", conn->ifmarker_en);
2622 break;
2623 case ISCSI_PARAM_OFMARKER_EN:
2624 len = sprintf(buf, "%d\n", conn->ofmarker_en);
2625 break;
2626 case ISCSI_PARAM_EXP_STATSN:
2627 len = sprintf(buf, "%u\n", conn->exp_statsn);
2628 break;
2629 case ISCSI_PARAM_PERSISTENT_PORT:
2630 len = sprintf(buf, "%d\n", conn->persistent_port);
2631 break;
2632 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2633 len = sprintf(buf, "%s\n", conn->persistent_address);
2634 break;
2635 default:
2636 return -ENOSYS;
2637 }
2638
2639 return len;
2640 }
2641 EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
2642
2643 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2644 char *buf)
2645 {
2646 struct iscsi_host *ihost = shost_priv(shost);
2647 int len;
2648
2649 switch (param) {
2650 case ISCSI_HOST_PARAM_NETDEV_NAME:
2651 if (!ihost->netdev)
2652 len = sprintf(buf, "%s\n", "default");
2653 else
2654 len = sprintf(buf, "%s\n", ihost->netdev);
2655 break;
2656 case ISCSI_HOST_PARAM_HWADDRESS:
2657 if (!ihost->hwaddress)
2658 len = sprintf(buf, "%s\n", "default");
2659 else
2660 len = sprintf(buf, "%s\n", ihost->hwaddress);
2661 break;
2662 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2663 if (!ihost->initiatorname)
2664 len = sprintf(buf, "%s\n", "unknown");
2665 else
2666 len = sprintf(buf, "%s\n", ihost->initiatorname);
2667 break;
2668 case ISCSI_HOST_PARAM_IPADDRESS:
2669 if (!strlen(ihost->local_address))
2670 len = sprintf(buf, "%s\n", "unknown");
2671 else
2672 len = sprintf(buf, "%s\n",
2673 ihost->local_address);
2674 break;
2675 default:
2676 return -ENOSYS;
2677 }
2678
2679 return len;
2680 }
2681 EXPORT_SYMBOL_GPL(iscsi_host_get_param);
2682
2683 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2684 char *buf, int buflen)
2685 {
2686 struct iscsi_host *ihost = shost_priv(shost);
2687
2688 switch (param) {
2689 case ISCSI_HOST_PARAM_NETDEV_NAME:
2690 if (!ihost->netdev)
2691 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2692 break;
2693 case ISCSI_HOST_PARAM_HWADDRESS:
2694 if (!ihost->hwaddress)
2695 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2696 break;
2697 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2698 if (!ihost->initiatorname)
2699 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2700 break;
2701 default:
2702 return -ENOSYS;
2703 }
2704
2705 return 0;
2706 }
2707 EXPORT_SYMBOL_GPL(iscsi_host_set_param);
2708
2709 MODULE_AUTHOR("Mike Christie");
2710 MODULE_DESCRIPTION("iSCSI library functions");
2711 MODULE_LICENSE("GPL");