1 /*******************************************************************************
2 * This file contains the iSCSI Target specific utility functions.
4 * (c) Copyright 2007-2013 Datera, Inc.
6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ******************************************************************************/
19 #include <linux/list.h>
20 #include <linux/percpu_ida.h>
21 #include <net/ipv6.h> /* ipv6_addr_equal() */
22 #include <scsi/scsi_tcq.h>
23 #include <scsi/iscsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_fabric.h>
26 #include <target/iscsi/iscsi_transport.h>
28 #include <target/iscsi/iscsi_target_core.h>
29 #include "iscsi_target_parameters.h"
30 #include "iscsi_target_seq_pdu_list.h"
31 #include "iscsi_target_datain_values.h"
32 #include "iscsi_target_erl0.h"
33 #include "iscsi_target_erl1.h"
34 #include "iscsi_target_erl2.h"
35 #include "iscsi_target_tpg.h"
36 #include "iscsi_target_util.h"
37 #include "iscsi_target.h"
39 #define PRINT_BUFF(buff, len) \
43 pr_debug("%d:\n", __LINE__); \
44 for (zzz = 0; zzz < len; zzz++) { \
45 if (zzz % 16 == 0) { \
48 pr_debug("%4i: ", zzz); \
50 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
56 extern struct list_head g_tiqn_list
;
57 extern spinlock_t tiqn_lock
;
60 * Called with cmd->r2t_lock held.
62 int iscsit_add_r2t_to_list(
63 struct iscsi_cmd
*cmd
,
69 struct iscsi_r2t
*r2t
;
71 r2t
= kmem_cache_zalloc(lio_r2t_cache
, GFP_ATOMIC
);
73 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
76 INIT_LIST_HEAD(&r2t
->r2t_list
);
78 r2t
->recovery_r2t
= recovery
;
79 r2t
->r2t_sn
= (!r2t_sn
) ? cmd
->r2t_sn
++ : r2t_sn
;
81 r2t
->xfer_len
= xfer_len
;
82 list_add_tail(&r2t
->r2t_list
, &cmd
->cmd_r2t_list
);
83 spin_unlock_bh(&cmd
->r2t_lock
);
85 iscsit_add_cmd_to_immediate_queue(cmd
, cmd
->conn
, ISTATE_SEND_R2T
);
87 spin_lock_bh(&cmd
->r2t_lock
);
91 struct iscsi_r2t
*iscsit_get_r2t_for_eos(
92 struct iscsi_cmd
*cmd
,
96 struct iscsi_r2t
*r2t
;
98 spin_lock_bh(&cmd
->r2t_lock
);
99 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
100 if ((r2t
->offset
<= offset
) &&
101 (r2t
->offset
+ r2t
->xfer_len
) >= (offset
+ length
)) {
102 spin_unlock_bh(&cmd
->r2t_lock
);
106 spin_unlock_bh(&cmd
->r2t_lock
);
108 pr_err("Unable to locate R2T for Offset: %u, Length:"
109 " %u\n", offset
, length
);
113 struct iscsi_r2t
*iscsit_get_r2t_from_list(struct iscsi_cmd
*cmd
)
115 struct iscsi_r2t
*r2t
;
117 spin_lock_bh(&cmd
->r2t_lock
);
118 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
119 if (!r2t
->sent_r2t
) {
120 spin_unlock_bh(&cmd
->r2t_lock
);
124 spin_unlock_bh(&cmd
->r2t_lock
);
126 pr_err("Unable to locate next R2T to send for ITT:"
127 " 0x%08x.\n", cmd
->init_task_tag
);
132 * Called with cmd->r2t_lock held.
134 void iscsit_free_r2t(struct iscsi_r2t
*r2t
, struct iscsi_cmd
*cmd
)
136 list_del(&r2t
->r2t_list
);
137 kmem_cache_free(lio_r2t_cache
, r2t
);
140 void iscsit_free_r2ts_from_list(struct iscsi_cmd
*cmd
)
142 struct iscsi_r2t
*r2t
, *r2t_tmp
;
144 spin_lock_bh(&cmd
->r2t_lock
);
145 list_for_each_entry_safe(r2t
, r2t_tmp
, &cmd
->cmd_r2t_list
, r2t_list
)
146 iscsit_free_r2t(r2t
, cmd
);
147 spin_unlock_bh(&cmd
->r2t_lock
);
151 * May be called from software interrupt (timer) context for allocating
154 struct iscsi_cmd
*iscsit_allocate_cmd(struct iscsi_conn
*conn
, int state
)
156 struct iscsi_cmd
*cmd
;
157 struct se_session
*se_sess
= conn
->sess
->se_sess
;
160 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, state
);
164 size
= sizeof(struct iscsi_cmd
) + conn
->conn_transport
->priv_size
;
165 cmd
= (struct iscsi_cmd
*)(se_sess
->sess_cmd_map
+ (tag
* size
));
166 memset(cmd
, 0, size
);
168 cmd
->se_cmd
.map_tag
= tag
;
170 INIT_LIST_HEAD(&cmd
->i_conn_node
);
171 INIT_LIST_HEAD(&cmd
->datain_list
);
172 INIT_LIST_HEAD(&cmd
->cmd_r2t_list
);
173 spin_lock_init(&cmd
->datain_lock
);
174 spin_lock_init(&cmd
->dataout_timeout_lock
);
175 spin_lock_init(&cmd
->istate_lock
);
176 spin_lock_init(&cmd
->error_lock
);
177 spin_lock_init(&cmd
->r2t_lock
);
181 EXPORT_SYMBOL(iscsit_allocate_cmd
);
183 struct iscsi_seq
*iscsit_get_seq_holder_for_datain(
184 struct iscsi_cmd
*cmd
,
189 for (i
= 0; i
< cmd
->seq_count
; i
++)
190 if (cmd
->seq_list
[i
].seq_send_order
== seq_send_order
)
191 return &cmd
->seq_list
[i
];
196 struct iscsi_seq
*iscsit_get_seq_holder_for_r2t(struct iscsi_cmd
*cmd
)
200 if (!cmd
->seq_list
) {
201 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
205 for (i
= 0; i
< cmd
->seq_count
; i
++) {
206 if (cmd
->seq_list
[i
].type
!= SEQTYPE_NORMAL
)
208 if (cmd
->seq_list
[i
].seq_send_order
== cmd
->seq_send_order
) {
209 cmd
->seq_send_order
++;
210 return &cmd
->seq_list
[i
];
217 struct iscsi_r2t
*iscsit_get_holder_for_r2tsn(
218 struct iscsi_cmd
*cmd
,
221 struct iscsi_r2t
*r2t
;
223 spin_lock_bh(&cmd
->r2t_lock
);
224 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
225 if (r2t
->r2t_sn
== r2t_sn
) {
226 spin_unlock_bh(&cmd
->r2t_lock
);
230 spin_unlock_bh(&cmd
->r2t_lock
);
235 static inline int iscsit_check_received_cmdsn(struct iscsi_session
*sess
, u32 cmdsn
)
241 * This is the proper method of checking received CmdSN against
242 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
243 * or order CmdSNs due to multiple connection sessions and/or
246 max_cmdsn
= atomic_read(&sess
->max_cmd_sn
);
247 if (iscsi_sna_gt(cmdsn
, max_cmdsn
)) {
248 pr_err("Received CmdSN: 0x%08x is greater than"
249 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn
, max_cmdsn
);
250 ret
= CMDSN_MAXCMDSN_OVERRUN
;
252 } else if (cmdsn
== sess
->exp_cmd_sn
) {
254 pr_debug("Received CmdSN matches ExpCmdSN,"
255 " incremented ExpCmdSN to: 0x%08x\n",
257 ret
= CMDSN_NORMAL_OPERATION
;
259 } else if (iscsi_sna_gt(cmdsn
, sess
->exp_cmd_sn
)) {
260 pr_debug("Received CmdSN: 0x%08x is greater"
261 " than ExpCmdSN: 0x%08x, not acknowledging.\n",
262 cmdsn
, sess
->exp_cmd_sn
);
263 ret
= CMDSN_HIGHER_THAN_EXP
;
266 pr_err("Received CmdSN: 0x%08x is less than"
267 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn
,
269 ret
= CMDSN_LOWER_THAN_EXP
;
276 * Commands may be received out of order if MC/S is in use.
277 * Ensure they are executed in CmdSN order.
279 int iscsit_sequence_cmd(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
280 unsigned char *buf
, __be32 cmdsn
)
284 u8 reason
= ISCSI_REASON_BOOKMARK_NO_RESOURCES
;
286 mutex_lock(&conn
->sess
->cmdsn_mutex
);
288 cmdsn_ret
= iscsit_check_received_cmdsn(conn
->sess
, be32_to_cpu(cmdsn
));
290 case CMDSN_NORMAL_OPERATION
:
291 ret
= iscsit_execute_cmd(cmd
, 0);
292 if ((ret
>= 0) && !list_empty(&conn
->sess
->sess_ooo_cmdsn_list
))
293 iscsit_execute_ooo_cmdsns(conn
->sess
);
296 ret
= CMDSN_ERROR_CANNOT_RECOVER
;
299 case CMDSN_HIGHER_THAN_EXP
:
300 ret
= iscsit_handle_ooo_cmdsn(conn
->sess
, cmd
, be32_to_cpu(cmdsn
));
303 ret
= CMDSN_ERROR_CANNOT_RECOVER
;
306 ret
= CMDSN_HIGHER_THAN_EXP
;
308 case CMDSN_LOWER_THAN_EXP
:
309 case CMDSN_MAXCMDSN_OVERRUN
:
311 cmd
->i_state
= ISTATE_REMOVE
;
312 iscsit_add_cmd_to_immediate_queue(cmd
, conn
, cmd
->i_state
);
314 * Existing callers for iscsit_sequence_cmd() will silently
315 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
316 * return for CMDSN_MAXCMDSN_OVERRUN as well..
318 ret
= CMDSN_LOWER_THAN_EXP
;
321 mutex_unlock(&conn
->sess
->cmdsn_mutex
);
324 iscsit_reject_cmd(cmd
, reason
, buf
);
328 EXPORT_SYMBOL(iscsit_sequence_cmd
);
330 int iscsit_check_unsolicited_dataout(struct iscsi_cmd
*cmd
, unsigned char *buf
)
332 struct iscsi_conn
*conn
= cmd
->conn
;
333 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
334 struct iscsi_data
*hdr
= (struct iscsi_data
*) buf
;
335 u32 payload_length
= ntoh24(hdr
->dlength
);
337 if (conn
->sess
->sess_ops
->InitialR2T
) {
338 pr_err("Received unexpected unsolicited data"
339 " while InitialR2T=Yes, protocol error.\n");
340 transport_send_check_condition_and_sense(se_cmd
,
341 TCM_UNEXPECTED_UNSOLICITED_DATA
, 0);
345 if ((cmd
->first_burst_len
+ payload_length
) >
346 conn
->sess
->sess_ops
->FirstBurstLength
) {
347 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
348 " for this Unsolicited DataOut Burst.\n",
349 (cmd
->first_burst_len
+ payload_length
),
350 conn
->sess
->sess_ops
->FirstBurstLength
);
351 transport_send_check_condition_and_sense(se_cmd
,
352 TCM_INCORRECT_AMOUNT_OF_DATA
, 0);
356 if (!(hdr
->flags
& ISCSI_FLAG_CMD_FINAL
))
359 if (((cmd
->first_burst_len
+ payload_length
) != cmd
->se_cmd
.data_length
) &&
360 ((cmd
->first_burst_len
+ payload_length
) !=
361 conn
->sess
->sess_ops
->FirstBurstLength
)) {
362 pr_err("Unsolicited non-immediate data received %u"
363 " does not equal FirstBurstLength: %u, and does"
364 " not equal ExpXferLen %u.\n",
365 (cmd
->first_burst_len
+ payload_length
),
366 conn
->sess
->sess_ops
->FirstBurstLength
, cmd
->se_cmd
.data_length
);
367 transport_send_check_condition_and_sense(se_cmd
,
368 TCM_INCORRECT_AMOUNT_OF_DATA
, 0);
374 struct iscsi_cmd
*iscsit_find_cmd_from_itt(
375 struct iscsi_conn
*conn
,
378 struct iscsi_cmd
*cmd
;
380 spin_lock_bh(&conn
->cmd_lock
);
381 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
382 if (cmd
->init_task_tag
== init_task_tag
) {
383 spin_unlock_bh(&conn
->cmd_lock
);
387 spin_unlock_bh(&conn
->cmd_lock
);
389 pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
390 init_task_tag
, conn
->cid
);
393 EXPORT_SYMBOL(iscsit_find_cmd_from_itt
);
395 struct iscsi_cmd
*iscsit_find_cmd_from_itt_or_dump(
396 struct iscsi_conn
*conn
,
400 struct iscsi_cmd
*cmd
;
402 spin_lock_bh(&conn
->cmd_lock
);
403 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
404 if (cmd
->cmd_flags
& ICF_GOT_LAST_DATAOUT
)
406 if (cmd
->init_task_tag
== init_task_tag
) {
407 spin_unlock_bh(&conn
->cmd_lock
);
411 spin_unlock_bh(&conn
->cmd_lock
);
413 pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
414 " dumping payload\n", init_task_tag
, conn
->cid
);
416 iscsit_dump_data_payload(conn
, length
, 1);
420 EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump
);
422 struct iscsi_cmd
*iscsit_find_cmd_from_ttt(
423 struct iscsi_conn
*conn
,
426 struct iscsi_cmd
*cmd
= NULL
;
428 spin_lock_bh(&conn
->cmd_lock
);
429 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
430 if (cmd
->targ_xfer_tag
== targ_xfer_tag
) {
431 spin_unlock_bh(&conn
->cmd_lock
);
435 spin_unlock_bh(&conn
->cmd_lock
);
437 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
438 targ_xfer_tag
, conn
->cid
);
442 int iscsit_find_cmd_for_recovery(
443 struct iscsi_session
*sess
,
444 struct iscsi_cmd
**cmd_ptr
,
445 struct iscsi_conn_recovery
**cr_ptr
,
448 struct iscsi_cmd
*cmd
= NULL
;
449 struct iscsi_conn_recovery
*cr
;
451 * Scan through the inactive connection recovery list's command list.
452 * If init_task_tag matches the command is still alligent.
454 spin_lock(&sess
->cr_i_lock
);
455 list_for_each_entry(cr
, &sess
->cr_inactive_list
, cr_list
) {
456 spin_lock(&cr
->conn_recovery_cmd_lock
);
457 list_for_each_entry(cmd
, &cr
->conn_recovery_cmd_list
, i_conn_node
) {
458 if (cmd
->init_task_tag
== init_task_tag
) {
459 spin_unlock(&cr
->conn_recovery_cmd_lock
);
460 spin_unlock(&sess
->cr_i_lock
);
467 spin_unlock(&cr
->conn_recovery_cmd_lock
);
469 spin_unlock(&sess
->cr_i_lock
);
471 * Scan through the active connection recovery list's command list.
472 * If init_task_tag matches the command is ready to be reassigned.
474 spin_lock(&sess
->cr_a_lock
);
475 list_for_each_entry(cr
, &sess
->cr_active_list
, cr_list
) {
476 spin_lock(&cr
->conn_recovery_cmd_lock
);
477 list_for_each_entry(cmd
, &cr
->conn_recovery_cmd_list
, i_conn_node
) {
478 if (cmd
->init_task_tag
== init_task_tag
) {
479 spin_unlock(&cr
->conn_recovery_cmd_lock
);
480 spin_unlock(&sess
->cr_a_lock
);
487 spin_unlock(&cr
->conn_recovery_cmd_lock
);
489 spin_unlock(&sess
->cr_a_lock
);
494 void iscsit_add_cmd_to_immediate_queue(
495 struct iscsi_cmd
*cmd
,
496 struct iscsi_conn
*conn
,
499 struct iscsi_queue_req
*qr
;
501 qr
= kmem_cache_zalloc(lio_qr_cache
, GFP_ATOMIC
);
503 pr_err("Unable to allocate memory for"
504 " struct iscsi_queue_req\n");
507 INIT_LIST_HEAD(&qr
->qr_list
);
511 spin_lock_bh(&conn
->immed_queue_lock
);
512 list_add_tail(&qr
->qr_list
, &conn
->immed_queue_list
);
513 atomic_inc(&cmd
->immed_queue_count
);
514 atomic_set(&conn
->check_immediate_queue
, 1);
515 spin_unlock_bh(&conn
->immed_queue_lock
);
517 wake_up(&conn
->queues_wq
);
519 EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue
);
521 struct iscsi_queue_req
*iscsit_get_cmd_from_immediate_queue(struct iscsi_conn
*conn
)
523 struct iscsi_queue_req
*qr
;
525 spin_lock_bh(&conn
->immed_queue_lock
);
526 if (list_empty(&conn
->immed_queue_list
)) {
527 spin_unlock_bh(&conn
->immed_queue_lock
);
530 qr
= list_first_entry(&conn
->immed_queue_list
,
531 struct iscsi_queue_req
, qr_list
);
533 list_del(&qr
->qr_list
);
535 atomic_dec(&qr
->cmd
->immed_queue_count
);
536 spin_unlock_bh(&conn
->immed_queue_lock
);
541 static void iscsit_remove_cmd_from_immediate_queue(
542 struct iscsi_cmd
*cmd
,
543 struct iscsi_conn
*conn
)
545 struct iscsi_queue_req
*qr
, *qr_tmp
;
547 spin_lock_bh(&conn
->immed_queue_lock
);
548 if (!atomic_read(&cmd
->immed_queue_count
)) {
549 spin_unlock_bh(&conn
->immed_queue_lock
);
553 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->immed_queue_list
, qr_list
) {
557 atomic_dec(&qr
->cmd
->immed_queue_count
);
558 list_del(&qr
->qr_list
);
559 kmem_cache_free(lio_qr_cache
, qr
);
561 spin_unlock_bh(&conn
->immed_queue_lock
);
563 if (atomic_read(&cmd
->immed_queue_count
)) {
564 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
566 atomic_read(&cmd
->immed_queue_count
));
570 int iscsit_add_cmd_to_response_queue(
571 struct iscsi_cmd
*cmd
,
572 struct iscsi_conn
*conn
,
575 struct iscsi_queue_req
*qr
;
577 qr
= kmem_cache_zalloc(lio_qr_cache
, GFP_ATOMIC
);
579 pr_err("Unable to allocate memory for"
580 " struct iscsi_queue_req\n");
583 INIT_LIST_HEAD(&qr
->qr_list
);
587 spin_lock_bh(&conn
->response_queue_lock
);
588 list_add_tail(&qr
->qr_list
, &conn
->response_queue_list
);
589 atomic_inc(&cmd
->response_queue_count
);
590 spin_unlock_bh(&conn
->response_queue_lock
);
592 wake_up(&conn
->queues_wq
);
596 struct iscsi_queue_req
*iscsit_get_cmd_from_response_queue(struct iscsi_conn
*conn
)
598 struct iscsi_queue_req
*qr
;
600 spin_lock_bh(&conn
->response_queue_lock
);
601 if (list_empty(&conn
->response_queue_list
)) {
602 spin_unlock_bh(&conn
->response_queue_lock
);
606 qr
= list_first_entry(&conn
->response_queue_list
,
607 struct iscsi_queue_req
, qr_list
);
609 list_del(&qr
->qr_list
);
611 atomic_dec(&qr
->cmd
->response_queue_count
);
612 spin_unlock_bh(&conn
->response_queue_lock
);
617 static void iscsit_remove_cmd_from_response_queue(
618 struct iscsi_cmd
*cmd
,
619 struct iscsi_conn
*conn
)
621 struct iscsi_queue_req
*qr
, *qr_tmp
;
623 spin_lock_bh(&conn
->response_queue_lock
);
624 if (!atomic_read(&cmd
->response_queue_count
)) {
625 spin_unlock_bh(&conn
->response_queue_lock
);
629 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->response_queue_list
,
634 atomic_dec(&qr
->cmd
->response_queue_count
);
635 list_del(&qr
->qr_list
);
636 kmem_cache_free(lio_qr_cache
, qr
);
638 spin_unlock_bh(&conn
->response_queue_lock
);
640 if (atomic_read(&cmd
->response_queue_count
)) {
641 pr_err("ITT: 0x%08x response_queue_count: %d\n",
643 atomic_read(&cmd
->response_queue_count
));
647 bool iscsit_conn_all_queues_empty(struct iscsi_conn
*conn
)
651 spin_lock_bh(&conn
->immed_queue_lock
);
652 empty
= list_empty(&conn
->immed_queue_list
);
653 spin_unlock_bh(&conn
->immed_queue_lock
);
658 spin_lock_bh(&conn
->response_queue_lock
);
659 empty
= list_empty(&conn
->response_queue_list
);
660 spin_unlock_bh(&conn
->response_queue_lock
);
665 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn
*conn
)
667 struct iscsi_queue_req
*qr
, *qr_tmp
;
669 spin_lock_bh(&conn
->immed_queue_lock
);
670 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->immed_queue_list
, qr_list
) {
671 list_del(&qr
->qr_list
);
673 atomic_dec(&qr
->cmd
->immed_queue_count
);
675 kmem_cache_free(lio_qr_cache
, qr
);
677 spin_unlock_bh(&conn
->immed_queue_lock
);
679 spin_lock_bh(&conn
->response_queue_lock
);
680 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->response_queue_list
,
682 list_del(&qr
->qr_list
);
684 atomic_dec(&qr
->cmd
->response_queue_count
);
686 kmem_cache_free(lio_qr_cache
, qr
);
688 spin_unlock_bh(&conn
->response_queue_lock
);
691 void iscsit_release_cmd(struct iscsi_cmd
*cmd
)
693 struct iscsi_session
*sess
;
694 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
697 sess
= cmd
->conn
->sess
;
701 BUG_ON(!sess
|| !sess
->se_sess
);
704 kfree(cmd
->pdu_list
);
705 kfree(cmd
->seq_list
);
707 kfree(cmd
->iov_data
);
708 kfree(cmd
->text_in_ptr
);
710 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, se_cmd
->map_tag
);
712 EXPORT_SYMBOL(iscsit_release_cmd
);
714 void __iscsit_free_cmd(struct iscsi_cmd
*cmd
, bool scsi_cmd
,
717 struct iscsi_conn
*conn
= cmd
->conn
;
720 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
721 iscsit_stop_dataout_timer(cmd
);
722 iscsit_free_r2ts_from_list(cmd
);
724 if (cmd
->data_direction
== DMA_FROM_DEVICE
)
725 iscsit_free_all_datain_reqs(cmd
);
728 if (conn
&& check_queues
) {
729 iscsit_remove_cmd_from_immediate_queue(cmd
, conn
);
730 iscsit_remove_cmd_from_response_queue(cmd
, conn
);
733 if (conn
&& conn
->conn_transport
->iscsit_release_cmd
)
734 conn
->conn_transport
->iscsit_release_cmd(conn
, cmd
);
737 void iscsit_free_cmd(struct iscsi_cmd
*cmd
, bool shutdown
)
739 struct se_cmd
*se_cmd
= NULL
;
741 bool op_scsi
= false;
743 * Determine if a struct se_cmd is associated with
744 * this struct iscsi_cmd.
746 switch (cmd
->iscsi_opcode
) {
747 case ISCSI_OP_SCSI_CMD
:
752 case ISCSI_OP_SCSI_TMFUNC
:
753 se_cmd
= &cmd
->se_cmd
;
754 __iscsit_free_cmd(cmd
, op_scsi
, shutdown
);
755 rc
= transport_generic_free_cmd(se_cmd
, shutdown
);
756 if (!rc
&& shutdown
&& se_cmd
->se_sess
) {
757 __iscsit_free_cmd(cmd
, op_scsi
, shutdown
);
758 target_put_sess_cmd(se_cmd
);
761 case ISCSI_OP_REJECT
:
763 * Handle special case for REJECT when iscsi_add_reject*() has
764 * overwritten the original iscsi_opcode assignment, and the
765 * associated cmd->se_cmd needs to be released.
767 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
768 se_cmd
= &cmd
->se_cmd
;
769 __iscsit_free_cmd(cmd
, true, shutdown
);
771 rc
= transport_generic_free_cmd(&cmd
->se_cmd
, shutdown
);
772 if (!rc
&& shutdown
&& se_cmd
->se_sess
) {
773 __iscsit_free_cmd(cmd
, true, shutdown
);
774 target_put_sess_cmd(se_cmd
);
780 __iscsit_free_cmd(cmd
, false, shutdown
);
781 iscsit_release_cmd(cmd
);
785 EXPORT_SYMBOL(iscsit_free_cmd
);
787 int iscsit_check_session_usage_count(struct iscsi_session
*sess
)
789 spin_lock_bh(&sess
->session_usage_lock
);
790 if (sess
->session_usage_count
!= 0) {
791 sess
->session_waiting_on_uc
= 1;
792 spin_unlock_bh(&sess
->session_usage_lock
);
796 wait_for_completion(&sess
->session_waiting_on_uc_comp
);
799 spin_unlock_bh(&sess
->session_usage_lock
);
804 void iscsit_dec_session_usage_count(struct iscsi_session
*sess
)
806 spin_lock_bh(&sess
->session_usage_lock
);
807 sess
->session_usage_count
--;
809 if (!sess
->session_usage_count
&& sess
->session_waiting_on_uc
)
810 complete(&sess
->session_waiting_on_uc_comp
);
812 spin_unlock_bh(&sess
->session_usage_lock
);
815 void iscsit_inc_session_usage_count(struct iscsi_session
*sess
)
817 spin_lock_bh(&sess
->session_usage_lock
);
818 sess
->session_usage_count
++;
819 spin_unlock_bh(&sess
->session_usage_lock
);
822 struct iscsi_conn
*iscsit_get_conn_from_cid(struct iscsi_session
*sess
, u16 cid
)
824 struct iscsi_conn
*conn
;
826 spin_lock_bh(&sess
->conn_lock
);
827 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
) {
828 if ((conn
->cid
== cid
) &&
829 (conn
->conn_state
== TARG_CONN_STATE_LOGGED_IN
)) {
830 iscsit_inc_conn_usage_count(conn
);
831 spin_unlock_bh(&sess
->conn_lock
);
835 spin_unlock_bh(&sess
->conn_lock
);
840 struct iscsi_conn
*iscsit_get_conn_from_cid_rcfr(struct iscsi_session
*sess
, u16 cid
)
842 struct iscsi_conn
*conn
;
844 spin_lock_bh(&sess
->conn_lock
);
845 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
) {
846 if (conn
->cid
== cid
) {
847 iscsit_inc_conn_usage_count(conn
);
848 spin_lock(&conn
->state_lock
);
849 atomic_set(&conn
->connection_wait_rcfr
, 1);
850 spin_unlock(&conn
->state_lock
);
851 spin_unlock_bh(&sess
->conn_lock
);
855 spin_unlock_bh(&sess
->conn_lock
);
860 void iscsit_check_conn_usage_count(struct iscsi_conn
*conn
)
862 spin_lock_bh(&conn
->conn_usage_lock
);
863 if (conn
->conn_usage_count
!= 0) {
864 conn
->conn_waiting_on_uc
= 1;
865 spin_unlock_bh(&conn
->conn_usage_lock
);
867 wait_for_completion(&conn
->conn_waiting_on_uc_comp
);
870 spin_unlock_bh(&conn
->conn_usage_lock
);
873 void iscsit_dec_conn_usage_count(struct iscsi_conn
*conn
)
875 spin_lock_bh(&conn
->conn_usage_lock
);
876 conn
->conn_usage_count
--;
878 if (!conn
->conn_usage_count
&& conn
->conn_waiting_on_uc
)
879 complete(&conn
->conn_waiting_on_uc_comp
);
881 spin_unlock_bh(&conn
->conn_usage_lock
);
884 void iscsit_inc_conn_usage_count(struct iscsi_conn
*conn
)
886 spin_lock_bh(&conn
->conn_usage_lock
);
887 conn
->conn_usage_count
++;
888 spin_unlock_bh(&conn
->conn_usage_lock
);
891 static int iscsit_add_nopin(struct iscsi_conn
*conn
, int want_response
)
894 struct iscsi_cmd
*cmd
;
896 cmd
= iscsit_allocate_cmd(conn
, TASK_RUNNING
);
900 cmd
->iscsi_opcode
= ISCSI_OP_NOOP_IN
;
901 state
= (want_response
) ? ISTATE_SEND_NOPIN_WANT_RESPONSE
:
902 ISTATE_SEND_NOPIN_NO_RESPONSE
;
903 cmd
->init_task_tag
= RESERVED_ITT
;
904 cmd
->targ_xfer_tag
= (want_response
) ?
905 session_get_next_ttt(conn
->sess
) : 0xFFFFFFFF;
906 spin_lock_bh(&conn
->cmd_lock
);
907 list_add_tail(&cmd
->i_conn_node
, &conn
->conn_cmd_list
);
908 spin_unlock_bh(&conn
->cmd_lock
);
911 iscsit_start_nopin_response_timer(conn
);
912 iscsit_add_cmd_to_immediate_queue(cmd
, conn
, state
);
917 static void iscsit_handle_nopin_response_timeout(unsigned long data
)
919 struct iscsi_conn
*conn
= (struct iscsi_conn
*) data
;
921 iscsit_inc_conn_usage_count(conn
);
923 spin_lock_bh(&conn
->nopin_timer_lock
);
924 if (conn
->nopin_response_timer_flags
& ISCSI_TF_STOP
) {
925 spin_unlock_bh(&conn
->nopin_timer_lock
);
926 iscsit_dec_conn_usage_count(conn
);
930 pr_debug("Did not receive response to NOPIN on CID: %hu on"
931 " SID: %u, failing connection.\n", conn
->cid
,
933 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_RUNNING
;
934 spin_unlock_bh(&conn
->nopin_timer_lock
);
937 struct iscsi_portal_group
*tpg
= conn
->sess
->tpg
;
938 struct iscsi_tiqn
*tiqn
= tpg
->tpg_tiqn
;
941 spin_lock_bh(&tiqn
->sess_err_stats
.lock
);
942 strcpy(tiqn
->sess_err_stats
.last_sess_fail_rem_name
,
943 conn
->sess
->sess_ops
->InitiatorName
);
944 tiqn
->sess_err_stats
.last_sess_failure_type
=
945 ISCSI_SESS_ERR_CXN_TIMEOUT
;
946 tiqn
->sess_err_stats
.cxn_timeout_errors
++;
947 atomic_long_inc(&conn
->sess
->conn_timeout_errors
);
948 spin_unlock_bh(&tiqn
->sess_err_stats
.lock
);
952 iscsit_cause_connection_reinstatement(conn
, 0);
953 iscsit_dec_conn_usage_count(conn
);
956 void iscsit_mod_nopin_response_timer(struct iscsi_conn
*conn
)
958 struct iscsi_session
*sess
= conn
->sess
;
959 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
961 spin_lock_bh(&conn
->nopin_timer_lock
);
962 if (!(conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
)) {
963 spin_unlock_bh(&conn
->nopin_timer_lock
);
967 mod_timer(&conn
->nopin_response_timer
,
968 (get_jiffies_64() + na
->nopin_response_timeout
* HZ
));
969 spin_unlock_bh(&conn
->nopin_timer_lock
);
973 * Called with conn->nopin_timer_lock held.
975 void iscsit_start_nopin_response_timer(struct iscsi_conn
*conn
)
977 struct iscsi_session
*sess
= conn
->sess
;
978 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
980 spin_lock_bh(&conn
->nopin_timer_lock
);
981 if (conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
) {
982 spin_unlock_bh(&conn
->nopin_timer_lock
);
986 init_timer(&conn
->nopin_response_timer
);
987 conn
->nopin_response_timer
.expires
=
988 (get_jiffies_64() + na
->nopin_response_timeout
* HZ
);
989 conn
->nopin_response_timer
.data
= (unsigned long)conn
;
990 conn
->nopin_response_timer
.function
= iscsit_handle_nopin_response_timeout
;
991 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_STOP
;
992 conn
->nopin_response_timer_flags
|= ISCSI_TF_RUNNING
;
993 add_timer(&conn
->nopin_response_timer
);
995 pr_debug("Started NOPIN Response Timer on CID: %d to %u"
996 " seconds\n", conn
->cid
, na
->nopin_response_timeout
);
997 spin_unlock_bh(&conn
->nopin_timer_lock
);
1000 void iscsit_stop_nopin_response_timer(struct iscsi_conn
*conn
)
1002 spin_lock_bh(&conn
->nopin_timer_lock
);
1003 if (!(conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
)) {
1004 spin_unlock_bh(&conn
->nopin_timer_lock
);
1007 conn
->nopin_response_timer_flags
|= ISCSI_TF_STOP
;
1008 spin_unlock_bh(&conn
->nopin_timer_lock
);
1010 del_timer_sync(&conn
->nopin_response_timer
);
1012 spin_lock_bh(&conn
->nopin_timer_lock
);
1013 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_RUNNING
;
1014 spin_unlock_bh(&conn
->nopin_timer_lock
);
1017 static void iscsit_handle_nopin_timeout(unsigned long data
)
1019 struct iscsi_conn
*conn
= (struct iscsi_conn
*) data
;
1021 iscsit_inc_conn_usage_count(conn
);
1023 spin_lock_bh(&conn
->nopin_timer_lock
);
1024 if (conn
->nopin_timer_flags
& ISCSI_TF_STOP
) {
1025 spin_unlock_bh(&conn
->nopin_timer_lock
);
1026 iscsit_dec_conn_usage_count(conn
);
1029 conn
->nopin_timer_flags
&= ~ISCSI_TF_RUNNING
;
1030 spin_unlock_bh(&conn
->nopin_timer_lock
);
1032 iscsit_add_nopin(conn
, 1);
1033 iscsit_dec_conn_usage_count(conn
);
1037 * Called with conn->nopin_timer_lock held.
1039 void __iscsit_start_nopin_timer(struct iscsi_conn
*conn
)
1041 struct iscsi_session
*sess
= conn
->sess
;
1042 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
1044 * NOPIN timeout is disabled.
1046 if (!na
->nopin_timeout
)
1049 if (conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
)
1052 init_timer(&conn
->nopin_timer
);
1053 conn
->nopin_timer
.expires
= (get_jiffies_64() + na
->nopin_timeout
* HZ
);
1054 conn
->nopin_timer
.data
= (unsigned long)conn
;
1055 conn
->nopin_timer
.function
= iscsit_handle_nopin_timeout
;
1056 conn
->nopin_timer_flags
&= ~ISCSI_TF_STOP
;
1057 conn
->nopin_timer_flags
|= ISCSI_TF_RUNNING
;
1058 add_timer(&conn
->nopin_timer
);
1060 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1061 " interval\n", conn
->cid
, na
->nopin_timeout
);
1064 void iscsit_start_nopin_timer(struct iscsi_conn
*conn
)
1066 struct iscsi_session
*sess
= conn
->sess
;
1067 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
1069 * NOPIN timeout is disabled..
1071 if (!na
->nopin_timeout
)
1074 spin_lock_bh(&conn
->nopin_timer_lock
);
1075 if (conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
) {
1076 spin_unlock_bh(&conn
->nopin_timer_lock
);
1080 init_timer(&conn
->nopin_timer
);
1081 conn
->nopin_timer
.expires
= (get_jiffies_64() + na
->nopin_timeout
* HZ
);
1082 conn
->nopin_timer
.data
= (unsigned long)conn
;
1083 conn
->nopin_timer
.function
= iscsit_handle_nopin_timeout
;
1084 conn
->nopin_timer_flags
&= ~ISCSI_TF_STOP
;
1085 conn
->nopin_timer_flags
|= ISCSI_TF_RUNNING
;
1086 add_timer(&conn
->nopin_timer
);
1088 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1089 " interval\n", conn
->cid
, na
->nopin_timeout
);
1090 spin_unlock_bh(&conn
->nopin_timer_lock
);
1093 void iscsit_stop_nopin_timer(struct iscsi_conn
*conn
)
1095 spin_lock_bh(&conn
->nopin_timer_lock
);
1096 if (!(conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
)) {
1097 spin_unlock_bh(&conn
->nopin_timer_lock
);
1100 conn
->nopin_timer_flags
|= ISCSI_TF_STOP
;
1101 spin_unlock_bh(&conn
->nopin_timer_lock
);
1103 del_timer_sync(&conn
->nopin_timer
);
1105 spin_lock_bh(&conn
->nopin_timer_lock
);
1106 conn
->nopin_timer_flags
&= ~ISCSI_TF_RUNNING
;
1107 spin_unlock_bh(&conn
->nopin_timer_lock
);
1110 int iscsit_send_tx_data(
1111 struct iscsi_cmd
*cmd
,
1112 struct iscsi_conn
*conn
,
1115 int tx_sent
, tx_size
;
1120 tx_size
= cmd
->tx_size
;
1123 iov
= &cmd
->iov_data
[0];
1124 iov_count
= cmd
->iov_data_count
;
1126 iov
= &cmd
->iov_misc
[0];
1127 iov_count
= cmd
->iov_misc_count
;
1130 tx_sent
= tx_data(conn
, &iov
[0], iov_count
, tx_size
);
1131 if (tx_size
!= tx_sent
) {
1132 if (tx_sent
== -EAGAIN
) {
1133 pr_err("tx_data() returned -EAGAIN\n");
1143 int iscsit_fe_sendpage_sg(
1144 struct iscsi_cmd
*cmd
,
1145 struct iscsi_conn
*conn
)
1147 struct scatterlist
*sg
= cmd
->first_data_sg
;
1149 u32 tx_hdr_size
, data_len
;
1150 u32 offset
= cmd
->first_data_sg_off
;
1151 int tx_sent
, iov_off
;
1154 tx_hdr_size
= ISCSI_HDR_LEN
;
1155 if (conn
->conn_ops
->HeaderDigest
)
1156 tx_hdr_size
+= ISCSI_CRC_LEN
;
1158 iov
.iov_base
= cmd
->pdu
;
1159 iov
.iov_len
= tx_hdr_size
;
1161 tx_sent
= tx_data(conn
, &iov
, 1, tx_hdr_size
);
1162 if (tx_hdr_size
!= tx_sent
) {
1163 if (tx_sent
== -EAGAIN
) {
1164 pr_err("tx_data() returned -EAGAIN\n");
1170 data_len
= cmd
->tx_size
- tx_hdr_size
- cmd
->padding
;
1172 * Set iov_off used by padding and data digest tx_data() calls below
1173 * in order to determine proper offset into cmd->iov_data[]
1175 if (conn
->conn_ops
->DataDigest
) {
1176 data_len
-= ISCSI_CRC_LEN
;
1178 iov_off
= (cmd
->iov_data_count
- 2);
1180 iov_off
= (cmd
->iov_data_count
- 1);
1182 iov_off
= (cmd
->iov_data_count
- 1);
1185 * Perform sendpage() for each page in the scatterlist
1188 u32 space
= (sg
->length
- offset
);
1189 u32 sub_len
= min_t(u32
, data_len
, space
);
1191 tx_sent
= conn
->sock
->ops
->sendpage(conn
->sock
,
1192 sg_page(sg
), sg
->offset
+ offset
, sub_len
, 0);
1193 if (tx_sent
!= sub_len
) {
1194 if (tx_sent
== -EAGAIN
) {
1195 pr_err("tcp_sendpage() returned"
1200 pr_err("tcp_sendpage() failure: %d\n",
1205 data_len
-= sub_len
;
1212 struct kvec
*iov_p
= &cmd
->iov_data
[iov_off
++];
1214 tx_sent
= tx_data(conn
, iov_p
, 1, cmd
->padding
);
1215 if (cmd
->padding
!= tx_sent
) {
1216 if (tx_sent
== -EAGAIN
) {
1217 pr_err("tx_data() returned -EAGAIN\n");
1225 if (conn
->conn_ops
->DataDigest
) {
1226 struct kvec
*iov_d
= &cmd
->iov_data
[iov_off
];
1228 tx_sent
= tx_data(conn
, iov_d
, 1, ISCSI_CRC_LEN
);
1229 if (ISCSI_CRC_LEN
!= tx_sent
) {
1230 if (tx_sent
== -EAGAIN
) {
1231 pr_err("tx_data() returned -EAGAIN\n");
1242 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1243 * back to the Initiator when an expection condition occurs with the
1244 * errors set in status_class and status_detail.
1246 * Parameters: iSCSI Connection, Status Class, Status Detail.
1247 * Returns: 0 on success, -1 on error.
1249 int iscsit_tx_login_rsp(struct iscsi_conn
*conn
, u8 status_class
, u8 status_detail
)
1251 struct iscsi_login_rsp
*hdr
;
1252 struct iscsi_login
*login
= conn
->conn_login
;
1254 login
->login_failed
= 1;
1255 iscsit_collect_login_stats(conn
, status_class
, status_detail
);
1257 memset(&login
->rsp
[0], 0, ISCSI_HDR_LEN
);
1259 hdr
= (struct iscsi_login_rsp
*)&login
->rsp
[0];
1260 hdr
->opcode
= ISCSI_OP_LOGIN_RSP
;
1261 hdr
->status_class
= status_class
;
1262 hdr
->status_detail
= status_detail
;
1263 hdr
->itt
= conn
->login_itt
;
1265 return conn
->conn_transport
->iscsit_put_login_tx(conn
, login
, 0);
1268 void iscsit_print_session_params(struct iscsi_session
*sess
)
1270 struct iscsi_conn
*conn
;
1272 pr_debug("-----------------------------[Session Params for"
1273 " SID: %u]-----------------------------\n", sess
->sid
);
1274 spin_lock_bh(&sess
->conn_lock
);
1275 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
)
1276 iscsi_dump_conn_ops(conn
->conn_ops
);
1277 spin_unlock_bh(&sess
->conn_lock
);
1279 iscsi_dump_sess_ops(sess
->sess_ops
);
1282 static int iscsit_do_rx_data(
1283 struct iscsi_conn
*conn
,
1284 struct iscsi_data_count
*count
)
1286 int data
= count
->data_length
, rx_loop
= 0, total_rx
= 0;
1289 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1292 memset(&msg
, 0, sizeof(struct msghdr
));
1293 iov_iter_kvec(&msg
.msg_iter
, READ
| ITER_KVEC
,
1294 count
->iov
, count
->iov_count
, data
);
1296 while (msg_data_left(&msg
)) {
1297 rx_loop
= sock_recvmsg(conn
->sock
, &msg
, MSG_WAITALL
);
1299 pr_debug("rx_loop: %d total_rx: %d\n",
1303 total_rx
+= rx_loop
;
1304 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1305 rx_loop
, total_rx
, data
);
1312 struct iscsi_conn
*conn
,
1317 struct iscsi_data_count c
;
1319 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1322 memset(&c
, 0, sizeof(struct iscsi_data_count
));
1324 c
.iov_count
= iov_count
;
1325 c
.data_length
= data
;
1326 c
.type
= ISCSI_RX_DATA
;
1328 return iscsit_do_rx_data(conn
, &c
);
1332 struct iscsi_conn
*conn
,
1340 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1344 pr_err("Data length is: %d\n", data
);
1348 memset(&msg
, 0, sizeof(struct msghdr
));
1350 iov_iter_kvec(&msg
.msg_iter
, WRITE
| ITER_KVEC
,
1351 iov
, iov_count
, data
);
1353 while (msg_data_left(&msg
)) {
1354 int tx_loop
= sock_sendmsg(conn
->sock
, &msg
);
1356 pr_debug("tx_loop: %d total_tx %d\n",
1360 total_tx
+= tx_loop
;
1361 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1362 tx_loop
, total_tx
, data
);
1368 void iscsit_collect_login_stats(
1369 struct iscsi_conn
*conn
,
1373 struct iscsi_param
*intrname
= NULL
;
1374 struct iscsi_tiqn
*tiqn
;
1375 struct iscsi_login_stats
*ls
;
1377 tiqn
= iscsit_snmp_get_tiqn(conn
);
1381 ls
= &tiqn
->login_stats
;
1383 spin_lock(&ls
->lock
);
1384 if (status_class
== ISCSI_STATUS_CLS_SUCCESS
)
1386 else if (status_class
== ISCSI_STATUS_CLS_REDIRECT
) {
1388 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_REDIRECT
;
1389 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1390 (status_detail
== ISCSI_LOGIN_STATUS_AUTH_FAILED
)) {
1391 ls
->authenticate_fails
++;
1392 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_AUTHENTICATE
;
1393 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1394 (status_detail
== ISCSI_LOGIN_STATUS_TGT_FORBIDDEN
)) {
1395 ls
->authorize_fails
++;
1396 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_AUTHORIZE
;
1397 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1398 (status_detail
== ISCSI_LOGIN_STATUS_INIT_ERR
)) {
1399 ls
->negotiate_fails
++;
1400 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_NEGOTIATE
;
1403 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_OTHER
;
1406 /* Save initiator name, ip address and time, if it is a failed login */
1407 if (status_class
!= ISCSI_STATUS_CLS_SUCCESS
) {
1408 if (conn
->param_list
)
1409 intrname
= iscsi_find_param_from_key(INITIATORNAME
,
1411 strlcpy(ls
->last_intr_fail_name
,
1412 (intrname
? intrname
->value
: "Unknown"),
1413 sizeof(ls
->last_intr_fail_name
));
1415 ls
->last_intr_fail_ip_family
= conn
->login_family
;
1417 ls
->last_intr_fail_sockaddr
= conn
->login_sockaddr
;
1418 ls
->last_fail_time
= get_jiffies_64();
1421 spin_unlock(&ls
->lock
);
1424 struct iscsi_tiqn
*iscsit_snmp_get_tiqn(struct iscsi_conn
*conn
)
1426 struct iscsi_portal_group
*tpg
;
1438 return tpg
->tpg_tiqn
;