]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/target/iscsi/iscsi_target.c
Merge tag 'stable/for-linus-3.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-artful-kernel.git] / drivers / target / iscsi / iscsi_target.c
1 /*******************************************************************************
2 * This file contains main functions related to the iSCSI Target Core Driver.
3 *
4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ******************************************************************************/
18
19 #include <linux/string.h>
20 #include <linux/kthread.h>
21 #include <linux/crypto.h>
22 #include <linux/completion.h>
23 #include <linux/module.h>
24 #include <linux/idr.h>
25 #include <asm/unaligned.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/iscsi_proto.h>
28 #include <scsi/scsi_tcq.h>
29 #include <target/target_core_base.h>
30 #include <target/target_core_fabric.h>
31 #include <target/target_core_configfs.h>
32
33 #include "iscsi_target_core.h"
34 #include "iscsi_target_parameters.h"
35 #include "iscsi_target_seq_pdu_list.h"
36 #include "iscsi_target_tq.h"
37 #include "iscsi_target_configfs.h"
38 #include "iscsi_target_datain_values.h"
39 #include "iscsi_target_erl0.h"
40 #include "iscsi_target_erl1.h"
41 #include "iscsi_target_erl2.h"
42 #include "iscsi_target_login.h"
43 #include "iscsi_target_tmr.h"
44 #include "iscsi_target_tpg.h"
45 #include "iscsi_target_util.h"
46 #include "iscsi_target.h"
47 #include "iscsi_target_device.h"
48 #include "iscsi_target_stat.h"
49
50 #include <target/iscsi/iscsi_transport.h>
51
52 static LIST_HEAD(g_tiqn_list);
53 static LIST_HEAD(g_np_list);
54 static DEFINE_SPINLOCK(tiqn_lock);
55 static DEFINE_SPINLOCK(np_lock);
56
57 static struct idr tiqn_idr;
58 struct idr sess_idr;
59 struct mutex auth_id_lock;
60 spinlock_t sess_idr_lock;
61
62 struct iscsit_global *iscsit_global;
63
64 struct kmem_cache *lio_qr_cache;
65 struct kmem_cache *lio_dr_cache;
66 struct kmem_cache *lio_ooo_cache;
67 struct kmem_cache *lio_r2t_cache;
68
69 static int iscsit_handle_immediate_data(struct iscsi_cmd *,
70 struct iscsi_scsi_req *, u32);
71
72 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
73 {
74 struct iscsi_tiqn *tiqn = NULL;
75
76 spin_lock(&tiqn_lock);
77 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
78 if (!strcmp(tiqn->tiqn, buf)) {
79
80 spin_lock(&tiqn->tiqn_state_lock);
81 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
82 tiqn->tiqn_access_count++;
83 spin_unlock(&tiqn->tiqn_state_lock);
84 spin_unlock(&tiqn_lock);
85 return tiqn;
86 }
87 spin_unlock(&tiqn->tiqn_state_lock);
88 }
89 }
90 spin_unlock(&tiqn_lock);
91
92 return NULL;
93 }
94
95 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
96 {
97 spin_lock(&tiqn->tiqn_state_lock);
98 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
99 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
100 spin_unlock(&tiqn->tiqn_state_lock);
101 return 0;
102 }
103 spin_unlock(&tiqn->tiqn_state_lock);
104
105 return -1;
106 }
107
108 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
109 {
110 spin_lock(&tiqn->tiqn_state_lock);
111 tiqn->tiqn_access_count--;
112 spin_unlock(&tiqn->tiqn_state_lock);
113 }
114
115 /*
116 * Note that IQN formatting is expected to be done in userspace, and
117 * no explict IQN format checks are done here.
118 */
119 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
120 {
121 struct iscsi_tiqn *tiqn = NULL;
122 int ret;
123
124 if (strlen(buf) >= ISCSI_IQN_LEN) {
125 pr_err("Target IQN exceeds %d bytes\n",
126 ISCSI_IQN_LEN);
127 return ERR_PTR(-EINVAL);
128 }
129
130 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
131 if (!tiqn) {
132 pr_err("Unable to allocate struct iscsi_tiqn\n");
133 return ERR_PTR(-ENOMEM);
134 }
135
136 sprintf(tiqn->tiqn, "%s", buf);
137 INIT_LIST_HEAD(&tiqn->tiqn_list);
138 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
139 spin_lock_init(&tiqn->tiqn_state_lock);
140 spin_lock_init(&tiqn->tiqn_tpg_lock);
141 spin_lock_init(&tiqn->sess_err_stats.lock);
142 spin_lock_init(&tiqn->login_stats.lock);
143 spin_lock_init(&tiqn->logout_stats.lock);
144
145 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
146
147 idr_preload(GFP_KERNEL);
148 spin_lock(&tiqn_lock);
149
150 ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
151 if (ret < 0) {
152 pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
153 spin_unlock(&tiqn_lock);
154 idr_preload_end();
155 kfree(tiqn);
156 return ERR_PTR(ret);
157 }
158 tiqn->tiqn_index = ret;
159 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
160
161 spin_unlock(&tiqn_lock);
162 idr_preload_end();
163
164 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
165
166 return tiqn;
167
168 }
169
170 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
171 {
172 /*
173 * Wait for accesses to said struct iscsi_tiqn to end.
174 */
175 spin_lock(&tiqn->tiqn_state_lock);
176 while (tiqn->tiqn_access_count != 0) {
177 spin_unlock(&tiqn->tiqn_state_lock);
178 msleep(10);
179 spin_lock(&tiqn->tiqn_state_lock);
180 }
181 spin_unlock(&tiqn->tiqn_state_lock);
182 }
183
184 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
185 {
186 /*
187 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
188 * while holding tiqn->tiqn_state_lock. This means that all subsequent
189 * attempts to access this struct iscsi_tiqn will fail from both transport
190 * fabric and control code paths.
191 */
192 if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
193 pr_err("iscsit_set_tiqn_shutdown() failed\n");
194 return;
195 }
196
197 iscsit_wait_for_tiqn(tiqn);
198
199 spin_lock(&tiqn_lock);
200 list_del(&tiqn->tiqn_list);
201 idr_remove(&tiqn_idr, tiqn->tiqn_index);
202 spin_unlock(&tiqn_lock);
203
204 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
205 tiqn->tiqn);
206 kfree(tiqn);
207 }
208
209 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
210 {
211 int ret;
212 /*
213 * Determine if the network portal is accepting storage traffic.
214 */
215 spin_lock_bh(&np->np_thread_lock);
216 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
217 spin_unlock_bh(&np->np_thread_lock);
218 return -1;
219 }
220 spin_unlock_bh(&np->np_thread_lock);
221 /*
222 * Determine if the portal group is accepting storage traffic.
223 */
224 spin_lock_bh(&tpg->tpg_state_lock);
225 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
226 spin_unlock_bh(&tpg->tpg_state_lock);
227 return -1;
228 }
229 spin_unlock_bh(&tpg->tpg_state_lock);
230
231 /*
232 * Here we serialize access across the TIQN+TPG Tuple.
233 */
234 ret = down_interruptible(&tpg->np_login_sem);
235 if ((ret != 0) || signal_pending(current))
236 return -1;
237
238 spin_lock_bh(&tpg->tpg_state_lock);
239 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
240 spin_unlock_bh(&tpg->tpg_state_lock);
241 up(&tpg->np_login_sem);
242 return -1;
243 }
244 spin_unlock_bh(&tpg->tpg_state_lock);
245
246 return 0;
247 }
248
249 void iscsit_login_kref_put(struct kref *kref)
250 {
251 struct iscsi_tpg_np *tpg_np = container_of(kref,
252 struct iscsi_tpg_np, tpg_np_kref);
253
254 complete(&tpg_np->tpg_np_comp);
255 }
256
257 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
258 struct iscsi_tpg_np *tpg_np)
259 {
260 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
261
262 up(&tpg->np_login_sem);
263
264 if (tpg_np)
265 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
266
267 if (tiqn)
268 iscsit_put_tiqn_for_login(tiqn);
269
270 return 0;
271 }
272
273 bool iscsit_check_np_match(
274 struct __kernel_sockaddr_storage *sockaddr,
275 struct iscsi_np *np,
276 int network_transport)
277 {
278 struct sockaddr_in *sock_in, *sock_in_e;
279 struct sockaddr_in6 *sock_in6, *sock_in6_e;
280 bool ip_match = false;
281 u16 port;
282
283 if (sockaddr->ss_family == AF_INET6) {
284 sock_in6 = (struct sockaddr_in6 *)sockaddr;
285 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
286
287 if (!memcmp(&sock_in6->sin6_addr.in6_u,
288 &sock_in6_e->sin6_addr.in6_u,
289 sizeof(struct in6_addr)))
290 ip_match = true;
291
292 port = ntohs(sock_in6->sin6_port);
293 } else {
294 sock_in = (struct sockaddr_in *)sockaddr;
295 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
296
297 if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
298 ip_match = true;
299
300 port = ntohs(sock_in->sin_port);
301 }
302
303 if ((ip_match == true) && (np->np_port == port) &&
304 (np->np_network_transport == network_transport))
305 return true;
306
307 return false;
308 }
309
310 static struct iscsi_np *iscsit_get_np(
311 struct __kernel_sockaddr_storage *sockaddr,
312 int network_transport)
313 {
314 struct iscsi_np *np;
315 bool match;
316
317 spin_lock_bh(&np_lock);
318 list_for_each_entry(np, &g_np_list, np_list) {
319 spin_lock(&np->np_thread_lock);
320 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
321 spin_unlock(&np->np_thread_lock);
322 continue;
323 }
324
325 match = iscsit_check_np_match(sockaddr, np, network_transport);
326 if (match == true) {
327 /*
328 * Increment the np_exports reference count now to
329 * prevent iscsit_del_np() below from being called
330 * while iscsi_tpg_add_network_portal() is called.
331 */
332 np->np_exports++;
333 spin_unlock(&np->np_thread_lock);
334 spin_unlock_bh(&np_lock);
335 return np;
336 }
337 spin_unlock(&np->np_thread_lock);
338 }
339 spin_unlock_bh(&np_lock);
340
341 return NULL;
342 }
343
344 struct iscsi_np *iscsit_add_np(
345 struct __kernel_sockaddr_storage *sockaddr,
346 char *ip_str,
347 int network_transport)
348 {
349 struct sockaddr_in *sock_in;
350 struct sockaddr_in6 *sock_in6;
351 struct iscsi_np *np;
352 int ret;
353 /*
354 * Locate the existing struct iscsi_np if already active..
355 */
356 np = iscsit_get_np(sockaddr, network_transport);
357 if (np)
358 return np;
359
360 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
361 if (!np) {
362 pr_err("Unable to allocate memory for struct iscsi_np\n");
363 return ERR_PTR(-ENOMEM);
364 }
365
366 np->np_flags |= NPF_IP_NETWORK;
367 if (sockaddr->ss_family == AF_INET6) {
368 sock_in6 = (struct sockaddr_in6 *)sockaddr;
369 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
370 np->np_port = ntohs(sock_in6->sin6_port);
371 } else {
372 sock_in = (struct sockaddr_in *)sockaddr;
373 sprintf(np->np_ip, "%s", ip_str);
374 np->np_port = ntohs(sock_in->sin_port);
375 }
376
377 np->np_network_transport = network_transport;
378 spin_lock_init(&np->np_thread_lock);
379 init_completion(&np->np_restart_comp);
380 INIT_LIST_HEAD(&np->np_list);
381
382 ret = iscsi_target_setup_login_socket(np, sockaddr);
383 if (ret != 0) {
384 kfree(np);
385 return ERR_PTR(ret);
386 }
387
388 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
389 if (IS_ERR(np->np_thread)) {
390 pr_err("Unable to create kthread: iscsi_np\n");
391 ret = PTR_ERR(np->np_thread);
392 kfree(np);
393 return ERR_PTR(ret);
394 }
395 /*
396 * Increment the np_exports reference count now to prevent
397 * iscsit_del_np() below from being run while a new call to
398 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
399 * active. We don't need to hold np->np_thread_lock at this
400 * point because iscsi_np has not been added to g_np_list yet.
401 */
402 np->np_exports = 1;
403
404 spin_lock_bh(&np_lock);
405 list_add_tail(&np->np_list, &g_np_list);
406 spin_unlock_bh(&np_lock);
407
408 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
409 np->np_ip, np->np_port, np->np_transport->name);
410
411 return np;
412 }
413
414 int iscsit_reset_np_thread(
415 struct iscsi_np *np,
416 struct iscsi_tpg_np *tpg_np,
417 struct iscsi_portal_group *tpg,
418 bool shutdown)
419 {
420 spin_lock_bh(&np->np_thread_lock);
421 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
422 spin_unlock_bh(&np->np_thread_lock);
423 return 0;
424 }
425 np->np_thread_state = ISCSI_NP_THREAD_RESET;
426
427 if (np->np_thread) {
428 spin_unlock_bh(&np->np_thread_lock);
429 send_sig(SIGINT, np->np_thread, 1);
430 wait_for_completion(&np->np_restart_comp);
431 spin_lock_bh(&np->np_thread_lock);
432 }
433 spin_unlock_bh(&np->np_thread_lock);
434
435 if (tpg_np && shutdown) {
436 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
437
438 wait_for_completion(&tpg_np->tpg_np_comp);
439 }
440
441 return 0;
442 }
443
444 static void iscsit_free_np(struct iscsi_np *np)
445 {
446 if (np->np_socket)
447 sock_release(np->np_socket);
448 }
449
450 int iscsit_del_np(struct iscsi_np *np)
451 {
452 spin_lock_bh(&np->np_thread_lock);
453 np->np_exports--;
454 if (np->np_exports) {
455 spin_unlock_bh(&np->np_thread_lock);
456 return 0;
457 }
458 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
459 spin_unlock_bh(&np->np_thread_lock);
460
461 if (np->np_thread) {
462 /*
463 * We need to send the signal to wakeup Linux/Net
464 * which may be sleeping in sock_accept()..
465 */
466 send_sig(SIGINT, np->np_thread, 1);
467 kthread_stop(np->np_thread);
468 }
469
470 np->np_transport->iscsit_free_np(np);
471
472 spin_lock_bh(&np_lock);
473 list_del(&np->np_list);
474 spin_unlock_bh(&np_lock);
475
476 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
477 np->np_ip, np->np_port, np->np_transport->name);
478
479 iscsit_put_transport(np->np_transport);
480 kfree(np);
481 return 0;
482 }
483
484 static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
485 static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
486
487 static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
488 {
489 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
490 return 0;
491 }
492
493 static struct iscsit_transport iscsi_target_transport = {
494 .name = "iSCSI/TCP",
495 .transport_type = ISCSI_TCP,
496 .owner = NULL,
497 .iscsit_setup_np = iscsit_setup_np,
498 .iscsit_accept_np = iscsit_accept_np,
499 .iscsit_free_np = iscsit_free_np,
500 .iscsit_get_login_rx = iscsit_get_login_rx,
501 .iscsit_put_login_tx = iscsit_put_login_tx,
502 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
503 .iscsit_immediate_queue = iscsit_immediate_queue,
504 .iscsit_response_queue = iscsit_response_queue,
505 .iscsit_queue_data_in = iscsit_queue_rsp,
506 .iscsit_queue_status = iscsit_queue_rsp,
507 };
508
509 static int __init iscsi_target_init_module(void)
510 {
511 int ret = 0;
512
513 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
514
515 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
516 if (!iscsit_global) {
517 pr_err("Unable to allocate memory for iscsit_global\n");
518 return -1;
519 }
520 mutex_init(&auth_id_lock);
521 spin_lock_init(&sess_idr_lock);
522 idr_init(&tiqn_idr);
523 idr_init(&sess_idr);
524
525 ret = iscsi_target_register_configfs();
526 if (ret < 0)
527 goto out;
528
529 ret = iscsi_thread_set_init();
530 if (ret < 0)
531 goto configfs_out;
532
533 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
534 TARGET_THREAD_SET_COUNT) {
535 pr_err("iscsi_allocate_thread_sets() returned"
536 " unexpected value!\n");
537 goto ts_out1;
538 }
539
540 lio_qr_cache = kmem_cache_create("lio_qr_cache",
541 sizeof(struct iscsi_queue_req),
542 __alignof__(struct iscsi_queue_req), 0, NULL);
543 if (!lio_qr_cache) {
544 pr_err("nable to kmem_cache_create() for"
545 " lio_qr_cache\n");
546 goto ts_out2;
547 }
548
549 lio_dr_cache = kmem_cache_create("lio_dr_cache",
550 sizeof(struct iscsi_datain_req),
551 __alignof__(struct iscsi_datain_req), 0, NULL);
552 if (!lio_dr_cache) {
553 pr_err("Unable to kmem_cache_create() for"
554 " lio_dr_cache\n");
555 goto qr_out;
556 }
557
558 lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
559 sizeof(struct iscsi_ooo_cmdsn),
560 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
561 if (!lio_ooo_cache) {
562 pr_err("Unable to kmem_cache_create() for"
563 " lio_ooo_cache\n");
564 goto dr_out;
565 }
566
567 lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
568 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
569 0, NULL);
570 if (!lio_r2t_cache) {
571 pr_err("Unable to kmem_cache_create() for"
572 " lio_r2t_cache\n");
573 goto ooo_out;
574 }
575
576 iscsit_register_transport(&iscsi_target_transport);
577
578 if (iscsit_load_discovery_tpg() < 0)
579 goto r2t_out;
580
581 return ret;
582 r2t_out:
583 kmem_cache_destroy(lio_r2t_cache);
584 ooo_out:
585 kmem_cache_destroy(lio_ooo_cache);
586 dr_out:
587 kmem_cache_destroy(lio_dr_cache);
588 qr_out:
589 kmem_cache_destroy(lio_qr_cache);
590 ts_out2:
591 iscsi_deallocate_thread_sets();
592 ts_out1:
593 iscsi_thread_set_free();
594 configfs_out:
595 iscsi_target_deregister_configfs();
596 out:
597 kfree(iscsit_global);
598 return -ENOMEM;
599 }
600
601 static void __exit iscsi_target_cleanup_module(void)
602 {
603 iscsi_deallocate_thread_sets();
604 iscsi_thread_set_free();
605 iscsit_release_discovery_tpg();
606 iscsit_unregister_transport(&iscsi_target_transport);
607 kmem_cache_destroy(lio_qr_cache);
608 kmem_cache_destroy(lio_dr_cache);
609 kmem_cache_destroy(lio_ooo_cache);
610 kmem_cache_destroy(lio_r2t_cache);
611
612 iscsi_target_deregister_configfs();
613
614 kfree(iscsit_global);
615 }
616
617 static int iscsit_add_reject(
618 struct iscsi_conn *conn,
619 u8 reason,
620 unsigned char *buf)
621 {
622 struct iscsi_cmd *cmd;
623
624 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
625 if (!cmd)
626 return -1;
627
628 cmd->iscsi_opcode = ISCSI_OP_REJECT;
629 cmd->reject_reason = reason;
630
631 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
632 if (!cmd->buf_ptr) {
633 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
634 iscsit_free_cmd(cmd, false);
635 return -1;
636 }
637
638 spin_lock_bh(&conn->cmd_lock);
639 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
640 spin_unlock_bh(&conn->cmd_lock);
641
642 cmd->i_state = ISTATE_SEND_REJECT;
643 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
644
645 return -1;
646 }
647
648 static int iscsit_add_reject_from_cmd(
649 struct iscsi_cmd *cmd,
650 u8 reason,
651 bool add_to_conn,
652 unsigned char *buf)
653 {
654 struct iscsi_conn *conn;
655
656 if (!cmd->conn) {
657 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
658 cmd->init_task_tag);
659 return -1;
660 }
661 conn = cmd->conn;
662
663 cmd->iscsi_opcode = ISCSI_OP_REJECT;
664 cmd->reject_reason = reason;
665
666 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
667 if (!cmd->buf_ptr) {
668 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
669 iscsit_free_cmd(cmd, false);
670 return -1;
671 }
672
673 if (add_to_conn) {
674 spin_lock_bh(&conn->cmd_lock);
675 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
676 spin_unlock_bh(&conn->cmd_lock);
677 }
678
679 cmd->i_state = ISTATE_SEND_REJECT;
680 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
681 /*
682 * Perform the kref_put now if se_cmd has already been setup by
683 * scsit_setup_scsi_cmd()
684 */
685 if (cmd->se_cmd.se_tfo != NULL) {
686 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
687 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
688 }
689 return -1;
690 }
691
692 static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
693 unsigned char *buf)
694 {
695 return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
696 }
697
698 int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
699 {
700 return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
701 }
702
703 /*
704 * Map some portion of the allocated scatterlist to an iovec, suitable for
705 * kernel sockets to copy data in/out.
706 */
707 static int iscsit_map_iovec(
708 struct iscsi_cmd *cmd,
709 struct kvec *iov,
710 u32 data_offset,
711 u32 data_length)
712 {
713 u32 i = 0;
714 struct scatterlist *sg;
715 unsigned int page_off;
716
717 /*
718 * We know each entry in t_data_sg contains a page.
719 */
720 sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
721 page_off = (data_offset % PAGE_SIZE);
722
723 cmd->first_data_sg = sg;
724 cmd->first_data_sg_off = page_off;
725
726 while (data_length) {
727 u32 cur_len = min_t(u32, data_length, sg->length - page_off);
728
729 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
730 iov[i].iov_len = cur_len;
731
732 data_length -= cur_len;
733 page_off = 0;
734 sg = sg_next(sg);
735 i++;
736 }
737
738 cmd->kmapped_nents = i;
739
740 return i;
741 }
742
743 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
744 {
745 u32 i;
746 struct scatterlist *sg;
747
748 sg = cmd->first_data_sg;
749
750 for (i = 0; i < cmd->kmapped_nents; i++)
751 kunmap(sg_page(&sg[i]));
752 }
753
754 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
755 {
756 LIST_HEAD(ack_list);
757 struct iscsi_cmd *cmd, *cmd_p;
758
759 conn->exp_statsn = exp_statsn;
760
761 if (conn->sess->sess_ops->RDMAExtensions)
762 return;
763
764 spin_lock_bh(&conn->cmd_lock);
765 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
766 spin_lock(&cmd->istate_lock);
767 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
768 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
769 cmd->i_state = ISTATE_REMOVE;
770 spin_unlock(&cmd->istate_lock);
771 list_move_tail(&cmd->i_conn_node, &ack_list);
772 continue;
773 }
774 spin_unlock(&cmd->istate_lock);
775 }
776 spin_unlock_bh(&conn->cmd_lock);
777
778 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
779 list_del(&cmd->i_conn_node);
780 iscsit_free_cmd(cmd, false);
781 }
782 }
783
784 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
785 {
786 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
787
788 iov_count += ISCSI_IOV_DATA_BUFFER;
789
790 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
791 if (!cmd->iov_data) {
792 pr_err("Unable to allocate cmd->iov_data\n");
793 return -ENOMEM;
794 }
795
796 cmd->orig_iov_data_count = iov_count;
797 return 0;
798 }
799
800 int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
801 unsigned char *buf)
802 {
803 int data_direction, payload_length;
804 struct iscsi_scsi_req *hdr;
805 int iscsi_task_attr;
806 int sam_task_attr;
807
808 atomic_long_inc(&conn->sess->cmd_pdus);
809
810 hdr = (struct iscsi_scsi_req *) buf;
811 payload_length = ntoh24(hdr->dlength);
812
813 /* FIXME; Add checks for AdditionalHeaderSegment */
814
815 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
816 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
817 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
818 " not set. Bad iSCSI Initiator.\n");
819 return iscsit_add_reject_cmd(cmd,
820 ISCSI_REASON_BOOKMARK_INVALID, buf);
821 }
822
823 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
824 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
825 /*
826 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
827 * that adds support for RESERVE/RELEASE. There is a bug
828 * add with this new functionality that sets R/W bits when
829 * neither CDB carries any READ or WRITE datapayloads.
830 */
831 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
832 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
833 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
834 goto done;
835 }
836
837 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
838 " set when Expected Data Transfer Length is 0 for"
839 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
840 return iscsit_add_reject_cmd(cmd,
841 ISCSI_REASON_BOOKMARK_INVALID, buf);
842 }
843 done:
844
845 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
846 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
847 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
848 " MUST be set if Expected Data Transfer Length is not 0."
849 " Bad iSCSI Initiator\n");
850 return iscsit_add_reject_cmd(cmd,
851 ISCSI_REASON_BOOKMARK_INVALID, buf);
852 }
853
854 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
855 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
856 pr_err("Bidirectional operations not supported!\n");
857 return iscsit_add_reject_cmd(cmd,
858 ISCSI_REASON_BOOKMARK_INVALID, buf);
859 }
860
861 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
862 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
863 " Scsi Command PDU.\n");
864 return iscsit_add_reject_cmd(cmd,
865 ISCSI_REASON_BOOKMARK_INVALID, buf);
866 }
867
868 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
869 pr_err("ImmediateData=No but DataSegmentLength=%u,"
870 " protocol error.\n", payload_length);
871 return iscsit_add_reject_cmd(cmd,
872 ISCSI_REASON_PROTOCOL_ERROR, buf);
873 }
874
875 if ((be32_to_cpu(hdr->data_length) == payload_length) &&
876 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
877 pr_err("Expected Data Transfer Length and Length of"
878 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
879 " bit is not set protocol error\n");
880 return iscsit_add_reject_cmd(cmd,
881 ISCSI_REASON_PROTOCOL_ERROR, buf);
882 }
883
884 if (payload_length > be32_to_cpu(hdr->data_length)) {
885 pr_err("DataSegmentLength: %u is greater than"
886 " EDTL: %u, protocol error.\n", payload_length,
887 hdr->data_length);
888 return iscsit_add_reject_cmd(cmd,
889 ISCSI_REASON_PROTOCOL_ERROR, buf);
890 }
891
892 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
893 pr_err("DataSegmentLength: %u is greater than"
894 " MaxXmitDataSegmentLength: %u, protocol error.\n",
895 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
896 return iscsit_add_reject_cmd(cmd,
897 ISCSI_REASON_PROTOCOL_ERROR, buf);
898 }
899
900 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
901 pr_err("DataSegmentLength: %u is greater than"
902 " FirstBurstLength: %u, protocol error.\n",
903 payload_length, conn->sess->sess_ops->FirstBurstLength);
904 return iscsit_add_reject_cmd(cmd,
905 ISCSI_REASON_BOOKMARK_INVALID, buf);
906 }
907
908 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
909 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
910 DMA_NONE;
911
912 cmd->data_direction = data_direction;
913 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
914 /*
915 * Figure out the SAM Task Attribute for the incoming SCSI CDB
916 */
917 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
918 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
919 sam_task_attr = MSG_SIMPLE_TAG;
920 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
921 sam_task_attr = MSG_ORDERED_TAG;
922 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
923 sam_task_attr = MSG_HEAD_TAG;
924 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
925 sam_task_attr = MSG_ACA_TAG;
926 else {
927 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
928 " MSG_SIMPLE_TAG\n", iscsi_task_attr);
929 sam_task_attr = MSG_SIMPLE_TAG;
930 }
931
932 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
933 cmd->i_state = ISTATE_NEW_CMD;
934 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
935 cmd->immediate_data = (payload_length) ? 1 : 0;
936 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
937 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
938 if (cmd->unsolicited_data)
939 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
940
941 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
942 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
943 spin_lock_bh(&conn->sess->ttt_lock);
944 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
945 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
946 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
947 spin_unlock_bh(&conn->sess->ttt_lock);
948 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
949 cmd->targ_xfer_tag = 0xFFFFFFFF;
950 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
951 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
952 cmd->first_burst_len = payload_length;
953
954 if (!conn->sess->sess_ops->RDMAExtensions &&
955 cmd->data_direction == DMA_FROM_DEVICE) {
956 struct iscsi_datain_req *dr;
957
958 dr = iscsit_allocate_datain_req();
959 if (!dr)
960 return iscsit_add_reject_cmd(cmd,
961 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
962
963 iscsit_attach_datain_req(cmd, dr);
964 }
965
966 /*
967 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
968 */
969 transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
970 conn->sess->se_sess, be32_to_cpu(hdr->data_length),
971 cmd->data_direction, sam_task_attr,
972 cmd->sense_buffer + 2);
973
974 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
975 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
976 hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
977 conn->cid);
978
979 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
980
981 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
982 scsilun_to_int(&hdr->lun));
983 if (cmd->sense_reason)
984 goto attach_cmd;
985
986 cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
987 if (cmd->sense_reason) {
988 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
989 return iscsit_add_reject_cmd(cmd,
990 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
991 }
992
993 goto attach_cmd;
994 }
995
996 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
997 return iscsit_add_reject_cmd(cmd,
998 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
999 }
1000
1001 attach_cmd:
1002 spin_lock_bh(&conn->cmd_lock);
1003 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1004 spin_unlock_bh(&conn->cmd_lock);
1005 /*
1006 * Check if we need to delay processing because of ALUA
1007 * Active/NonOptimized primary access state..
1008 */
1009 core_alua_check_nonop_delay(&cmd->se_cmd);
1010
1011 return 0;
1012 }
1013 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1014
1015 void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
1016 {
1017 iscsit_set_dataout_sequence_values(cmd);
1018
1019 spin_lock_bh(&cmd->dataout_timeout_lock);
1020 iscsit_start_dataout_timer(cmd, cmd->conn);
1021 spin_unlock_bh(&cmd->dataout_timeout_lock);
1022 }
1023 EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
1024
1025 int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1026 struct iscsi_scsi_req *hdr)
1027 {
1028 int cmdsn_ret = 0;
1029 /*
1030 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1031 * the Immediate Bit is not set, and no Immediate
1032 * Data is attached.
1033 *
1034 * A PDU/CmdSN carrying Immediate Data can only
1035 * be processed after the DataCRC has passed.
1036 * If the DataCRC fails, the CmdSN MUST NOT
1037 * be acknowledged. (See below)
1038 */
1039 if (!cmd->immediate_data) {
1040 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1041 (unsigned char *)hdr, hdr->cmdsn);
1042 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1043 return -1;
1044 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1045 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1046 return 0;
1047 }
1048 }
1049
1050 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1051
1052 /*
1053 * If no Immediate Data is attached, it's OK to return now.
1054 */
1055 if (!cmd->immediate_data) {
1056 if (!cmd->sense_reason && cmd->unsolicited_data)
1057 iscsit_set_unsoliticed_dataout(cmd);
1058 if (!cmd->sense_reason)
1059 return 0;
1060
1061 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1062 return 0;
1063 }
1064
1065 /*
1066 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1067 * execution. These exceptions are processed in CmdSN order using
1068 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1069 */
1070 if (cmd->sense_reason) {
1071 if (cmd->reject_reason)
1072 return 0;
1073
1074 return 1;
1075 }
1076 /*
1077 * Call directly into transport_generic_new_cmd() to perform
1078 * the backend memory allocation.
1079 */
1080 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1081 if (cmd->sense_reason)
1082 return 1;
1083
1084 return 0;
1085 }
1086 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1087
1088 static int
1089 iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
1090 bool dump_payload)
1091 {
1092 struct iscsi_conn *conn = cmd->conn;
1093 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1094 /*
1095 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1096 */
1097 if (dump_payload == true)
1098 goto after_immediate_data;
1099
1100 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1101 cmd->first_burst_len);
1102 after_immediate_data:
1103 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1104 /*
1105 * A PDU/CmdSN carrying Immediate Data passed
1106 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1107 * Immediate Bit is not set.
1108 */
1109 cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
1110 (unsigned char *)hdr, hdr->cmdsn);
1111 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1112 return -1;
1113
1114 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1115 int rc;
1116
1117 rc = iscsit_dump_data_payload(cmd->conn,
1118 cmd->first_burst_len, 1);
1119 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1120 return rc;
1121 } else if (cmd->unsolicited_data)
1122 iscsit_set_unsoliticed_dataout(cmd);
1123
1124 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1125 /*
1126 * Immediate Data failed DataCRC and ERL>=1,
1127 * silently drop this PDU and let the initiator
1128 * plug the CmdSN gap.
1129 *
1130 * FIXME: Send Unsolicited NOPIN with reserved
1131 * TTT here to help the initiator figure out
1132 * the missing CmdSN, although they should be
1133 * intelligent enough to determine the missing
1134 * CmdSN and issue a retry to plug the sequence.
1135 */
1136 cmd->i_state = ISTATE_REMOVE;
1137 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1138 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1139 return -1;
1140
1141 return 0;
1142 }
1143
1144 static int
1145 iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1146 unsigned char *buf)
1147 {
1148 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1149 int rc, immed_data;
1150 bool dump_payload = false;
1151
1152 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1153 if (rc < 0)
1154 return 0;
1155 /*
1156 * Allocation iovecs needed for struct socket operations for
1157 * traditional iSCSI block I/O.
1158 */
1159 if (iscsit_allocate_iovecs(cmd) < 0) {
1160 return iscsit_add_reject_cmd(cmd,
1161 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1162 }
1163 immed_data = cmd->immediate_data;
1164
1165 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1166 if (rc < 0)
1167 return rc;
1168 else if (rc > 0)
1169 dump_payload = true;
1170
1171 if (!immed_data)
1172 return 0;
1173
1174 return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1175 }
1176
1177 static u32 iscsit_do_crypto_hash_sg(
1178 struct hash_desc *hash,
1179 struct iscsi_cmd *cmd,
1180 u32 data_offset,
1181 u32 data_length,
1182 u32 padding,
1183 u8 *pad_bytes)
1184 {
1185 u32 data_crc;
1186 u32 i;
1187 struct scatterlist *sg;
1188 unsigned int page_off;
1189
1190 crypto_hash_init(hash);
1191
1192 sg = cmd->first_data_sg;
1193 page_off = cmd->first_data_sg_off;
1194
1195 i = 0;
1196 while (data_length) {
1197 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
1198
1199 crypto_hash_update(hash, &sg[i], cur_len);
1200
1201 data_length -= cur_len;
1202 page_off = 0;
1203 i++;
1204 }
1205
1206 if (padding) {
1207 struct scatterlist pad_sg;
1208
1209 sg_init_one(&pad_sg, pad_bytes, padding);
1210 crypto_hash_update(hash, &pad_sg, padding);
1211 }
1212 crypto_hash_final(hash, (u8 *) &data_crc);
1213
1214 return data_crc;
1215 }
1216
1217 static void iscsit_do_crypto_hash_buf(
1218 struct hash_desc *hash,
1219 const void *buf,
1220 u32 payload_length,
1221 u32 padding,
1222 u8 *pad_bytes,
1223 u8 *data_crc)
1224 {
1225 struct scatterlist sg;
1226
1227 crypto_hash_init(hash);
1228
1229 sg_init_one(&sg, buf, payload_length);
1230 crypto_hash_update(hash, &sg, payload_length);
1231
1232 if (padding) {
1233 sg_init_one(&sg, pad_bytes, padding);
1234 crypto_hash_update(hash, &sg, padding);
1235 }
1236 crypto_hash_final(hash, data_crc);
1237 }
1238
1239 int
1240 iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1241 struct iscsi_cmd **out_cmd)
1242 {
1243 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1244 struct iscsi_cmd *cmd = NULL;
1245 struct se_cmd *se_cmd;
1246 u32 payload_length = ntoh24(hdr->dlength);
1247 int rc;
1248
1249 if (!payload_length) {
1250 pr_warn("DataOUT payload is ZERO, ignoring.\n");
1251 return 0;
1252 }
1253
1254 /* iSCSI write */
1255 atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1256
1257 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1258 pr_err("DataSegmentLength: %u is greater than"
1259 " MaxXmitDataSegmentLength: %u\n", payload_length,
1260 conn->conn_ops->MaxXmitDataSegmentLength);
1261 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1262 buf);
1263 }
1264
1265 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
1266 payload_length);
1267 if (!cmd)
1268 return 0;
1269
1270 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1271 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1272 hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1273 payload_length, conn->cid);
1274
1275 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1276 pr_err("Command ITT: 0x%08x received DataOUT after"
1277 " last DataOUT received, dumping payload\n",
1278 cmd->init_task_tag);
1279 return iscsit_dump_data_payload(conn, payload_length, 1);
1280 }
1281
1282 if (cmd->data_direction != DMA_TO_DEVICE) {
1283 pr_err("Command ITT: 0x%08x received DataOUT for a"
1284 " NON-WRITE command.\n", cmd->init_task_tag);
1285 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
1286 }
1287 se_cmd = &cmd->se_cmd;
1288 iscsit_mod_dataout_timer(cmd);
1289
1290 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1291 pr_err("DataOut Offset: %u, Length %u greater than"
1292 " iSCSI Command EDTL %u, protocol error.\n",
1293 hdr->offset, payload_length, cmd->se_cmd.data_length);
1294 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1295 }
1296
1297 if (cmd->unsolicited_data) {
1298 int dump_unsolicited_data = 0;
1299
1300 if (conn->sess->sess_ops->InitialR2T) {
1301 pr_err("Received unexpected unsolicited data"
1302 " while InitialR2T=Yes, protocol error.\n");
1303 transport_send_check_condition_and_sense(&cmd->se_cmd,
1304 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1305 return -1;
1306 }
1307 /*
1308 * Special case for dealing with Unsolicited DataOUT
1309 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1310 * failures;
1311 */
1312
1313 /* Something's amiss if we're not in WRITE_PENDING state... */
1314 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1315 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1316 dump_unsolicited_data = 1;
1317
1318 if (dump_unsolicited_data) {
1319 /*
1320 * Check if a delayed TASK_ABORTED status needs to
1321 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1322 * received with the unsolicitied data out.
1323 */
1324 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1325 iscsit_stop_dataout_timer(cmd);
1326
1327 transport_check_aborted_status(se_cmd,
1328 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1329 return iscsit_dump_data_payload(conn, payload_length, 1);
1330 }
1331 } else {
1332 /*
1333 * For the normal solicited data path:
1334 *
1335 * Check for a delayed TASK_ABORTED status and dump any
1336 * incoming data out payload if one exists. Also, when the
1337 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1338 * data out sequence, we decrement outstanding_r2ts. Once
1339 * outstanding_r2ts reaches zero, go ahead and send the delayed
1340 * TASK_ABORTED status.
1341 */
1342 if (se_cmd->transport_state & CMD_T_ABORTED) {
1343 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1344 if (--cmd->outstanding_r2ts < 1) {
1345 iscsit_stop_dataout_timer(cmd);
1346 transport_check_aborted_status(
1347 se_cmd, 1);
1348 }
1349
1350 return iscsit_dump_data_payload(conn, payload_length, 1);
1351 }
1352 }
1353 /*
1354 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1355 * within-command recovery checks before receiving the payload.
1356 */
1357 rc = iscsit_check_pre_dataout(cmd, buf);
1358 if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1359 return 0;
1360 else if (rc == DATAOUT_CANNOT_RECOVER)
1361 return -1;
1362
1363 *out_cmd = cmd;
1364 return 0;
1365 }
1366 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1367
1368 static int
1369 iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1370 struct iscsi_data *hdr)
1371 {
1372 struct kvec *iov;
1373 u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1374 u32 payload_length = ntoh24(hdr->dlength);
1375 int iov_ret, data_crc_failed = 0;
1376
1377 rx_size += payload_length;
1378 iov = &cmd->iov_data[0];
1379
1380 iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
1381 payload_length);
1382 if (iov_ret < 0)
1383 return -1;
1384
1385 iov_count += iov_ret;
1386
1387 padding = ((-payload_length) & 3);
1388 if (padding != 0) {
1389 iov[iov_count].iov_base = cmd->pad_bytes;
1390 iov[iov_count++].iov_len = padding;
1391 rx_size += padding;
1392 pr_debug("Receiving %u padding bytes.\n", padding);
1393 }
1394
1395 if (conn->conn_ops->DataDigest) {
1396 iov[iov_count].iov_base = &checksum;
1397 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1398 rx_size += ISCSI_CRC_LEN;
1399 }
1400
1401 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1402
1403 iscsit_unmap_iovec(cmd);
1404
1405 if (rx_got != rx_size)
1406 return -1;
1407
1408 if (conn->conn_ops->DataDigest) {
1409 u32 data_crc;
1410
1411 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
1412 be32_to_cpu(hdr->offset),
1413 payload_length, padding,
1414 cmd->pad_bytes);
1415
1416 if (checksum != data_crc) {
1417 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1418 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1419 " does not match computed 0x%08x\n",
1420 hdr->itt, hdr->offset, payload_length,
1421 hdr->datasn, checksum, data_crc);
1422 data_crc_failed = 1;
1423 } else {
1424 pr_debug("Got CRC32C DataDigest 0x%08x for"
1425 " %u bytes of Data Out\n", checksum,
1426 payload_length);
1427 }
1428 }
1429
1430 return data_crc_failed;
1431 }
1432
1433 int
1434 iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
1435 bool data_crc_failed)
1436 {
1437 struct iscsi_conn *conn = cmd->conn;
1438 int rc, ooo_cmdsn;
1439 /*
1440 * Increment post receive data and CRC values or perform
1441 * within-command recovery.
1442 */
1443 rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1444 if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1445 return 0;
1446 else if (rc == DATAOUT_SEND_R2T) {
1447 iscsit_set_dataout_sequence_values(cmd);
1448 conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1449 } else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1450 /*
1451 * Handle extra special case for out of order
1452 * Unsolicited Data Out.
1453 */
1454 spin_lock_bh(&cmd->istate_lock);
1455 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1456 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1457 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1458 spin_unlock_bh(&cmd->istate_lock);
1459
1460 iscsit_stop_dataout_timer(cmd);
1461 if (ooo_cmdsn)
1462 return 0;
1463 target_execute_cmd(&cmd->se_cmd);
1464 return 0;
1465 } else /* DATAOUT_CANNOT_RECOVER */
1466 return -1;
1467
1468 return 0;
1469 }
1470 EXPORT_SYMBOL(iscsit_check_dataout_payload);
1471
1472 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1473 {
1474 struct iscsi_cmd *cmd = NULL;
1475 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1476 int rc;
1477 bool data_crc_failed = false;
1478
1479 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1480 if (rc < 0)
1481 return 0;
1482 else if (!cmd)
1483 return 0;
1484
1485 rc = iscsit_get_dataout(conn, cmd, hdr);
1486 if (rc < 0)
1487 return rc;
1488 else if (rc > 0)
1489 data_crc_failed = true;
1490
1491 return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1492 }
1493
1494 int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1495 struct iscsi_nopout *hdr)
1496 {
1497 u32 payload_length = ntoh24(hdr->dlength);
1498
1499 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1500 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1501 " not set, protocol error.\n");
1502 if (!cmd)
1503 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1504 (unsigned char *)hdr);
1505
1506 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1507 (unsigned char *)hdr);
1508 }
1509
1510 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1511 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1512 " greater than MaxXmitDataSegmentLength: %u, protocol"
1513 " error.\n", payload_length,
1514 conn->conn_ops->MaxXmitDataSegmentLength);
1515 if (!cmd)
1516 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1517 (unsigned char *)hdr);
1518
1519 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1520 (unsigned char *)hdr);
1521 }
1522
1523 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1524 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1525 hdr->itt == RESERVED_ITT ? "Response" : "Request",
1526 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1527 payload_length);
1528 /*
1529 * This is not a response to a Unsolicited NopIN, which means
1530 * it can either be a NOPOUT ping request (with a valid ITT),
1531 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1532 * Either way, make sure we allocate an struct iscsi_cmd, as both
1533 * can contain ping data.
1534 */
1535 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1536 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
1537 cmd->i_state = ISTATE_SEND_NOPIN;
1538 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1539 1 : 0);
1540 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1541 cmd->targ_xfer_tag = 0xFFFFFFFF;
1542 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1543 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1544 cmd->data_direction = DMA_NONE;
1545 }
1546
1547 return 0;
1548 }
1549 EXPORT_SYMBOL(iscsit_setup_nop_out);
1550
1551 int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1552 struct iscsi_nopout *hdr)
1553 {
1554 struct iscsi_cmd *cmd_p = NULL;
1555 int cmdsn_ret = 0;
1556 /*
1557 * Initiator is expecting a NopIN ping reply..
1558 */
1559 if (hdr->itt != RESERVED_ITT) {
1560 BUG_ON(!cmd);
1561
1562 spin_lock_bh(&conn->cmd_lock);
1563 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1564 spin_unlock_bh(&conn->cmd_lock);
1565
1566 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1567
1568 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1569 iscsit_add_cmd_to_response_queue(cmd, conn,
1570 cmd->i_state);
1571 return 0;
1572 }
1573
1574 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1575 (unsigned char *)hdr, hdr->cmdsn);
1576 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1577 return 0;
1578 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1579 return -1;
1580
1581 return 0;
1582 }
1583 /*
1584 * This was a response to a unsolicited NOPIN ping.
1585 */
1586 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1587 cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1588 if (!cmd_p)
1589 return -EINVAL;
1590
1591 iscsit_stop_nopin_response_timer(conn);
1592
1593 cmd_p->i_state = ISTATE_REMOVE;
1594 iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1595
1596 iscsit_start_nopin_timer(conn);
1597 return 0;
1598 }
1599 /*
1600 * Otherwise, initiator is not expecting a NOPIN is response.
1601 * Just ignore for now.
1602 */
1603 return 0;
1604 }
1605 EXPORT_SYMBOL(iscsit_process_nop_out);
1606
1607 static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1608 unsigned char *buf)
1609 {
1610 unsigned char *ping_data = NULL;
1611 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1612 struct kvec *iov = NULL;
1613 u32 payload_length = ntoh24(hdr->dlength);
1614 int ret;
1615
1616 ret = iscsit_setup_nop_out(conn, cmd, hdr);
1617 if (ret < 0)
1618 return 0;
1619 /*
1620 * Handle NOP-OUT payload for traditional iSCSI sockets
1621 */
1622 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1623 u32 checksum, data_crc, padding = 0;
1624 int niov = 0, rx_got, rx_size = payload_length;
1625
1626 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1627 if (!ping_data) {
1628 pr_err("Unable to allocate memory for"
1629 " NOPOUT ping data.\n");
1630 ret = -1;
1631 goto out;
1632 }
1633
1634 iov = &cmd->iov_misc[0];
1635 iov[niov].iov_base = ping_data;
1636 iov[niov++].iov_len = payload_length;
1637
1638 padding = ((-payload_length) & 3);
1639 if (padding != 0) {
1640 pr_debug("Receiving %u additional bytes"
1641 " for padding.\n", padding);
1642 iov[niov].iov_base = &cmd->pad_bytes;
1643 iov[niov++].iov_len = padding;
1644 rx_size += padding;
1645 }
1646 if (conn->conn_ops->DataDigest) {
1647 iov[niov].iov_base = &checksum;
1648 iov[niov++].iov_len = ISCSI_CRC_LEN;
1649 rx_size += ISCSI_CRC_LEN;
1650 }
1651
1652 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1653 if (rx_got != rx_size) {
1654 ret = -1;
1655 goto out;
1656 }
1657
1658 if (conn->conn_ops->DataDigest) {
1659 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1660 ping_data, payload_length,
1661 padding, cmd->pad_bytes,
1662 (u8 *)&data_crc);
1663
1664 if (checksum != data_crc) {
1665 pr_err("Ping data CRC32C DataDigest"
1666 " 0x%08x does not match computed 0x%08x\n",
1667 checksum, data_crc);
1668 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1669 pr_err("Unable to recover from"
1670 " NOPOUT Ping DataCRC failure while in"
1671 " ERL=0.\n");
1672 ret = -1;
1673 goto out;
1674 } else {
1675 /*
1676 * Silently drop this PDU and let the
1677 * initiator plug the CmdSN gap.
1678 */
1679 pr_debug("Dropping NOPOUT"
1680 " Command CmdSN: 0x%08x due to"
1681 " DataCRC error.\n", hdr->cmdsn);
1682 ret = 0;
1683 goto out;
1684 }
1685 } else {
1686 pr_debug("Got CRC32C DataDigest"
1687 " 0x%08x for %u bytes of ping data.\n",
1688 checksum, payload_length);
1689 }
1690 }
1691
1692 ping_data[payload_length] = '\0';
1693 /*
1694 * Attach ping data to struct iscsi_cmd->buf_ptr.
1695 */
1696 cmd->buf_ptr = ping_data;
1697 cmd->buf_ptr_size = payload_length;
1698
1699 pr_debug("Got %u bytes of NOPOUT ping"
1700 " data.\n", payload_length);
1701 pr_debug("Ping Data: \"%s\"\n", ping_data);
1702 }
1703
1704 return iscsit_process_nop_out(conn, cmd, hdr);
1705 out:
1706 if (cmd)
1707 iscsit_free_cmd(cmd, false);
1708
1709 kfree(ping_data);
1710 return ret;
1711 }
1712
1713 int
1714 iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1715 unsigned char *buf)
1716 {
1717 struct se_tmr_req *se_tmr;
1718 struct iscsi_tmr_req *tmr_req;
1719 struct iscsi_tm *hdr;
1720 int out_of_order_cmdsn = 0, ret;
1721 bool sess_ref = false;
1722 u8 function;
1723
1724 hdr = (struct iscsi_tm *) buf;
1725 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
1726 function = hdr->flags;
1727
1728 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
1729 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
1730 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
1731 hdr->rtt, hdr->refcmdsn, conn->cid);
1732
1733 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1734 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1735 hdr->rtt != RESERVED_ITT)) {
1736 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
1737 hdr->rtt = RESERVED_ITT;
1738 }
1739
1740 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
1741 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1742 pr_err("Task Management Request TASK_REASSIGN not"
1743 " issued as immediate command, bad iSCSI Initiator"
1744 "implementation\n");
1745 return iscsit_add_reject_cmd(cmd,
1746 ISCSI_REASON_PROTOCOL_ERROR, buf);
1747 }
1748 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1749 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
1750 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
1751
1752 cmd->data_direction = DMA_NONE;
1753
1754 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
1755 if (!cmd->tmr_req) {
1756 pr_err("Unable to allocate memory for"
1757 " Task Management command!\n");
1758 return iscsit_add_reject_cmd(cmd,
1759 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1760 buf);
1761 }
1762
1763 /*
1764 * TASK_REASSIGN for ERL=2 / connection stays inside of
1765 * LIO-Target $FABRIC_MOD
1766 */
1767 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1768
1769 u8 tcm_function;
1770 int ret;
1771
1772 transport_init_se_cmd(&cmd->se_cmd,
1773 &lio_target_fabric_configfs->tf_ops,
1774 conn->sess->se_sess, 0, DMA_NONE,
1775 MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
1776
1777 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
1778 sess_ref = true;
1779
1780 switch (function) {
1781 case ISCSI_TM_FUNC_ABORT_TASK:
1782 tcm_function = TMR_ABORT_TASK;
1783 break;
1784 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1785 tcm_function = TMR_ABORT_TASK_SET;
1786 break;
1787 case ISCSI_TM_FUNC_CLEAR_ACA:
1788 tcm_function = TMR_CLEAR_ACA;
1789 break;
1790 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1791 tcm_function = TMR_CLEAR_TASK_SET;
1792 break;
1793 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1794 tcm_function = TMR_LUN_RESET;
1795 break;
1796 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1797 tcm_function = TMR_TARGET_WARM_RESET;
1798 break;
1799 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1800 tcm_function = TMR_TARGET_COLD_RESET;
1801 break;
1802 default:
1803 pr_err("Unknown iSCSI TMR Function:"
1804 " 0x%02x\n", function);
1805 return iscsit_add_reject_cmd(cmd,
1806 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1807 }
1808
1809 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
1810 tcm_function, GFP_KERNEL);
1811 if (ret < 0)
1812 return iscsit_add_reject_cmd(cmd,
1813 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1814
1815 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
1816 }
1817
1818 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
1819 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
1820 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1821 cmd->init_task_tag = hdr->itt;
1822 cmd->targ_xfer_tag = 0xFFFFFFFF;
1823 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1824 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1825 se_tmr = cmd->se_cmd.se_tmr_req;
1826 tmr_req = cmd->tmr_req;
1827 /*
1828 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
1829 */
1830 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1831 ret = transport_lookup_tmr_lun(&cmd->se_cmd,
1832 scsilun_to_int(&hdr->lun));
1833 if (ret < 0) {
1834 se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
1835 goto attach;
1836 }
1837 }
1838
1839 switch (function) {
1840 case ISCSI_TM_FUNC_ABORT_TASK:
1841 se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
1842 if (se_tmr->response)
1843 goto attach;
1844 break;
1845 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1846 case ISCSI_TM_FUNC_CLEAR_ACA:
1847 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1848 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1849 break;
1850 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1851 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
1852 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1853 goto attach;
1854 }
1855 break;
1856 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1857 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
1858 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1859 goto attach;
1860 }
1861 break;
1862 case ISCSI_TM_FUNC_TASK_REASSIGN:
1863 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
1864 /*
1865 * Perform sanity checks on the ExpDataSN only if the
1866 * TASK_REASSIGN was successful.
1867 */
1868 if (se_tmr->response)
1869 break;
1870
1871 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
1872 return iscsit_add_reject_cmd(cmd,
1873 ISCSI_REASON_BOOKMARK_INVALID, buf);
1874 break;
1875 default:
1876 pr_err("Unknown TMR function: 0x%02x, protocol"
1877 " error.\n", function);
1878 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
1879 goto attach;
1880 }
1881
1882 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1883 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
1884 se_tmr->call_transport = 1;
1885 attach:
1886 spin_lock_bh(&conn->cmd_lock);
1887 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1888 spin_unlock_bh(&conn->cmd_lock);
1889
1890 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1891 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1892 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
1893 out_of_order_cmdsn = 1;
1894 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1895 return 0;
1896 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1897 return -1;
1898 }
1899 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1900
1901 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
1902 return 0;
1903 /*
1904 * Found the referenced task, send to transport for processing.
1905 */
1906 if (se_tmr->call_transport)
1907 return transport_generic_handle_tmr(&cmd->se_cmd);
1908
1909 /*
1910 * Could not find the referenced LUN, task, or Task Management
1911 * command not authorized or supported. Change state and
1912 * let the tx_thread send the response.
1913 *
1914 * For connection recovery, this is also the default action for
1915 * TMR TASK_REASSIGN.
1916 */
1917 if (sess_ref) {
1918 pr_debug("Handle TMR, using sess_ref=true check\n");
1919 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1920 }
1921
1922 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1923 return 0;
1924 }
1925 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
1926
1927 /* #warning FIXME: Support Text Command parameters besides SendTargets */
1928 int
1929 iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1930 struct iscsi_text *hdr)
1931 {
1932 u32 payload_length = ntoh24(hdr->dlength);
1933
1934 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1935 pr_err("Unable to accept text parameter length: %u"
1936 "greater than MaxXmitDataSegmentLength %u.\n",
1937 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1938 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1939 (unsigned char *)hdr);
1940 }
1941
1942 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
1943 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
1944 pr_err("Multi sequence text commands currently not supported\n");
1945 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
1946 (unsigned char *)hdr);
1947 }
1948
1949 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
1950 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
1951 hdr->exp_statsn, payload_length);
1952
1953 cmd->iscsi_opcode = ISCSI_OP_TEXT;
1954 cmd->i_state = ISTATE_SEND_TEXTRSP;
1955 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1956 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1957 cmd->targ_xfer_tag = 0xFFFFFFFF;
1958 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1959 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1960 cmd->data_direction = DMA_NONE;
1961
1962 return 0;
1963 }
1964 EXPORT_SYMBOL(iscsit_setup_text_cmd);
1965
1966 int
1967 iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1968 struct iscsi_text *hdr)
1969 {
1970 unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
1971 int cmdsn_ret;
1972
1973 if (!text_in) {
1974 pr_err("Unable to locate text_in buffer for sendtargets"
1975 " discovery\n");
1976 goto reject;
1977 }
1978 if (strncmp("SendTargets", text_in, 11) != 0) {
1979 pr_err("Received Text Data that is not"
1980 " SendTargets, cannot continue.\n");
1981 goto reject;
1982 }
1983 text_ptr = strchr(text_in, '=');
1984 if (!text_ptr) {
1985 pr_err("No \"=\" separator found in Text Data,"
1986 " cannot continue.\n");
1987 goto reject;
1988 }
1989 if (!strncmp("=All", text_ptr, 4)) {
1990 cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
1991 } else if (!strncmp("=iqn.", text_ptr, 5) ||
1992 !strncmp("=eui.", text_ptr, 5)) {
1993 cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
1994 } else {
1995 pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
1996 goto reject;
1997 }
1998
1999 spin_lock_bh(&conn->cmd_lock);
2000 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2001 spin_unlock_bh(&conn->cmd_lock);
2002
2003 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2004
2005 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2006 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2007 (unsigned char *)hdr, hdr->cmdsn);
2008 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2009 return -1;
2010
2011 return 0;
2012 }
2013
2014 return iscsit_execute_cmd(cmd, 0);
2015
2016 reject:
2017 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2018 (unsigned char *)hdr);
2019 }
2020 EXPORT_SYMBOL(iscsit_process_text_cmd);
2021
2022 static int
2023 iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2024 unsigned char *buf)
2025 {
2026 struct iscsi_text *hdr = (struct iscsi_text *)buf;
2027 char *text_in = NULL;
2028 u32 payload_length = ntoh24(hdr->dlength);
2029 int rx_size, rc;
2030
2031 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
2032 if (rc < 0)
2033 return 0;
2034
2035 rx_size = payload_length;
2036 if (payload_length) {
2037 u32 checksum = 0, data_crc = 0;
2038 u32 padding = 0, pad_bytes = 0;
2039 int niov = 0, rx_got;
2040 struct kvec iov[3];
2041
2042 text_in = kzalloc(payload_length, GFP_KERNEL);
2043 if (!text_in) {
2044 pr_err("Unable to allocate memory for"
2045 " incoming text parameters\n");
2046 goto reject;
2047 }
2048 cmd->text_in_ptr = text_in;
2049
2050 memset(iov, 0, 3 * sizeof(struct kvec));
2051 iov[niov].iov_base = text_in;
2052 iov[niov++].iov_len = payload_length;
2053
2054 padding = ((-payload_length) & 3);
2055 if (padding != 0) {
2056 iov[niov].iov_base = &pad_bytes;
2057 iov[niov++].iov_len = padding;
2058 rx_size += padding;
2059 pr_debug("Receiving %u additional bytes"
2060 " for padding.\n", padding);
2061 }
2062 if (conn->conn_ops->DataDigest) {
2063 iov[niov].iov_base = &checksum;
2064 iov[niov++].iov_len = ISCSI_CRC_LEN;
2065 rx_size += ISCSI_CRC_LEN;
2066 }
2067
2068 rx_got = rx_data(conn, &iov[0], niov, rx_size);
2069 if (rx_got != rx_size)
2070 goto reject;
2071
2072 if (conn->conn_ops->DataDigest) {
2073 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
2074 text_in, payload_length,
2075 padding, (u8 *)&pad_bytes,
2076 (u8 *)&data_crc);
2077
2078 if (checksum != data_crc) {
2079 pr_err("Text data CRC32C DataDigest"
2080 " 0x%08x does not match computed"
2081 " 0x%08x\n", checksum, data_crc);
2082 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2083 pr_err("Unable to recover from"
2084 " Text Data digest failure while in"
2085 " ERL=0.\n");
2086 goto reject;
2087 } else {
2088 /*
2089 * Silently drop this PDU and let the
2090 * initiator plug the CmdSN gap.
2091 */
2092 pr_debug("Dropping Text"
2093 " Command CmdSN: 0x%08x due to"
2094 " DataCRC error.\n", hdr->cmdsn);
2095 kfree(text_in);
2096 return 0;
2097 }
2098 } else {
2099 pr_debug("Got CRC32C DataDigest"
2100 " 0x%08x for %u bytes of text data.\n",
2101 checksum, payload_length);
2102 }
2103 }
2104 text_in[payload_length - 1] = '\0';
2105 pr_debug("Successfully read %d bytes of text"
2106 " data.\n", payload_length);
2107 }
2108
2109 return iscsit_process_text_cmd(conn, cmd, hdr);
2110
2111 reject:
2112 kfree(cmd->text_in_ptr);
2113 cmd->text_in_ptr = NULL;
2114 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2115 }
2116 EXPORT_SYMBOL(iscsit_handle_text_cmd);
2117
2118 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2119 {
2120 struct iscsi_conn *conn_p;
2121 struct iscsi_session *sess = conn->sess;
2122
2123 pr_debug("Received logout request CLOSESESSION on CID: %hu"
2124 " for SID: %u.\n", conn->cid, conn->sess->sid);
2125
2126 atomic_set(&sess->session_logout, 1);
2127 atomic_set(&conn->conn_logout_remove, 1);
2128 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2129
2130 iscsit_inc_conn_usage_count(conn);
2131 iscsit_inc_session_usage_count(sess);
2132
2133 spin_lock_bh(&sess->conn_lock);
2134 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2135 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2136 continue;
2137
2138 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2139 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2140 }
2141 spin_unlock_bh(&sess->conn_lock);
2142
2143 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2144
2145 return 0;
2146 }
2147
2148 int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2149 {
2150 struct iscsi_conn *l_conn;
2151 struct iscsi_session *sess = conn->sess;
2152
2153 pr_debug("Received logout request CLOSECONNECTION for CID:"
2154 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2155
2156 /*
2157 * A Logout Request with a CLOSECONNECTION reason code for a CID
2158 * can arrive on a connection with a differing CID.
2159 */
2160 if (conn->cid == cmd->logout_cid) {
2161 spin_lock_bh(&conn->state_lock);
2162 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2163 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2164
2165 atomic_set(&conn->conn_logout_remove, 1);
2166 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2167 iscsit_inc_conn_usage_count(conn);
2168
2169 spin_unlock_bh(&conn->state_lock);
2170 } else {
2171 /*
2172 * Handle all different cid CLOSECONNECTION requests in
2173 * iscsit_logout_post_handler_diffcid() as to give enough
2174 * time for any non immediate command's CmdSN to be
2175 * acknowledged on the connection in question.
2176 *
2177 * Here we simply make sure the CID is still around.
2178 */
2179 l_conn = iscsit_get_conn_from_cid(sess,
2180 cmd->logout_cid);
2181 if (!l_conn) {
2182 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2183 iscsit_add_cmd_to_response_queue(cmd, conn,
2184 cmd->i_state);
2185 return 0;
2186 }
2187
2188 iscsit_dec_conn_usage_count(l_conn);
2189 }
2190
2191 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2192
2193 return 0;
2194 }
2195
2196 int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2197 {
2198 struct iscsi_session *sess = conn->sess;
2199
2200 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2201 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2202
2203 if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2204 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2205 " while ERL!=2.\n");
2206 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2207 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2208 return 0;
2209 }
2210
2211 if (conn->cid == cmd->logout_cid) {
2212 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2213 " with CID: %hu on CID: %hu, implementation error.\n",
2214 cmd->logout_cid, conn->cid);
2215 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2216 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2217 return 0;
2218 }
2219
2220 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2221
2222 return 0;
2223 }
2224
2225 int
2226 iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2227 unsigned char *buf)
2228 {
2229 int cmdsn_ret, logout_remove = 0;
2230 u8 reason_code = 0;
2231 struct iscsi_logout *hdr;
2232 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2233
2234 hdr = (struct iscsi_logout *) buf;
2235 reason_code = (hdr->flags & 0x7f);
2236
2237 if (tiqn) {
2238 spin_lock(&tiqn->logout_stats.lock);
2239 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2240 tiqn->logout_stats.normal_logouts++;
2241 else
2242 tiqn->logout_stats.abnormal_logouts++;
2243 spin_unlock(&tiqn->logout_stats.lock);
2244 }
2245
2246 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2247 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2248 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2249 hdr->cid, conn->cid);
2250
2251 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2252 pr_err("Received logout request on connection that"
2253 " is not in logged in state, ignoring request.\n");
2254 iscsit_free_cmd(cmd, false);
2255 return 0;
2256 }
2257
2258 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2259 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2260 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2261 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2262 cmd->targ_xfer_tag = 0xFFFFFFFF;
2263 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2264 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2265 cmd->logout_cid = be16_to_cpu(hdr->cid);
2266 cmd->logout_reason = reason_code;
2267 cmd->data_direction = DMA_NONE;
2268
2269 /*
2270 * We need to sleep in these cases (by returning 1) until the Logout
2271 * Response gets sent in the tx thread.
2272 */
2273 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2274 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2275 be16_to_cpu(hdr->cid) == conn->cid))
2276 logout_remove = 1;
2277
2278 spin_lock_bh(&conn->cmd_lock);
2279 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2280 spin_unlock_bh(&conn->cmd_lock);
2281
2282 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2283 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2284
2285 /*
2286 * Immediate commands are executed, well, immediately.
2287 * Non-Immediate Logout Commands are executed in CmdSN order.
2288 */
2289 if (cmd->immediate_cmd) {
2290 int ret = iscsit_execute_cmd(cmd, 0);
2291
2292 if (ret < 0)
2293 return ret;
2294 } else {
2295 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2296 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2297 logout_remove = 0;
2298 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2299 return -1;
2300 }
2301
2302 return logout_remove;
2303 }
2304 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2305
2306 static int iscsit_handle_snack(
2307 struct iscsi_conn *conn,
2308 unsigned char *buf)
2309 {
2310 struct iscsi_snack *hdr;
2311
2312 hdr = (struct iscsi_snack *) buf;
2313 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2314
2315 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2316 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2317 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2318 hdr->begrun, hdr->runlength, conn->cid);
2319
2320 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2321 pr_err("Initiator sent SNACK request while in"
2322 " ErrorRecoveryLevel=0.\n");
2323 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2324 buf);
2325 }
2326 /*
2327 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2328 * call from inside iscsi_send_recovery_datain_or_r2t().
2329 */
2330 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2331 case 0:
2332 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2333 hdr->itt,
2334 be32_to_cpu(hdr->ttt),
2335 be32_to_cpu(hdr->begrun),
2336 be32_to_cpu(hdr->runlength));
2337 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2338 return iscsit_handle_status_snack(conn, hdr->itt,
2339 be32_to_cpu(hdr->ttt),
2340 be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2341 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2342 return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2343 be32_to_cpu(hdr->begrun),
2344 be32_to_cpu(hdr->runlength));
2345 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2346 /* FIXME: Support R-Data SNACK */
2347 pr_err("R-Data SNACK Not Supported.\n");
2348 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2349 buf);
2350 default:
2351 pr_err("Unknown SNACK type 0x%02x, protocol"
2352 " error.\n", hdr->flags & 0x0f);
2353 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2354 buf);
2355 }
2356
2357 return 0;
2358 }
2359
2360 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2361 {
2362 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2363 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2364 wait_for_completion_interruptible_timeout(
2365 &conn->rx_half_close_comp,
2366 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2367 }
2368 }
2369
2370 static int iscsit_handle_immediate_data(
2371 struct iscsi_cmd *cmd,
2372 struct iscsi_scsi_req *hdr,
2373 u32 length)
2374 {
2375 int iov_ret, rx_got = 0, rx_size = 0;
2376 u32 checksum, iov_count = 0, padding = 0;
2377 struct iscsi_conn *conn = cmd->conn;
2378 struct kvec *iov;
2379
2380 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
2381 if (iov_ret < 0)
2382 return IMMEDIATE_DATA_CANNOT_RECOVER;
2383
2384 rx_size = length;
2385 iov_count = iov_ret;
2386 iov = &cmd->iov_data[0];
2387
2388 padding = ((-length) & 3);
2389 if (padding != 0) {
2390 iov[iov_count].iov_base = cmd->pad_bytes;
2391 iov[iov_count++].iov_len = padding;
2392 rx_size += padding;
2393 }
2394
2395 if (conn->conn_ops->DataDigest) {
2396 iov[iov_count].iov_base = &checksum;
2397 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2398 rx_size += ISCSI_CRC_LEN;
2399 }
2400
2401 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2402
2403 iscsit_unmap_iovec(cmd);
2404
2405 if (rx_got != rx_size) {
2406 iscsit_rx_thread_wait_for_tcp(conn);
2407 return IMMEDIATE_DATA_CANNOT_RECOVER;
2408 }
2409
2410 if (conn->conn_ops->DataDigest) {
2411 u32 data_crc;
2412
2413 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
2414 cmd->write_data_done, length, padding,
2415 cmd->pad_bytes);
2416
2417 if (checksum != data_crc) {
2418 pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2419 " does not match computed 0x%08x\n", checksum,
2420 data_crc);
2421
2422 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2423 pr_err("Unable to recover from"
2424 " Immediate Data digest failure while"
2425 " in ERL=0.\n");
2426 iscsit_reject_cmd(cmd,
2427 ISCSI_REASON_DATA_DIGEST_ERROR,
2428 (unsigned char *)hdr);
2429 return IMMEDIATE_DATA_CANNOT_RECOVER;
2430 } else {
2431 iscsit_reject_cmd(cmd,
2432 ISCSI_REASON_DATA_DIGEST_ERROR,
2433 (unsigned char *)hdr);
2434 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2435 }
2436 } else {
2437 pr_debug("Got CRC32C DataDigest 0x%08x for"
2438 " %u bytes of Immediate Data\n", checksum,
2439 length);
2440 }
2441 }
2442
2443 cmd->write_data_done += length;
2444
2445 if (cmd->write_data_done == cmd->se_cmd.data_length) {
2446 spin_lock_bh(&cmd->istate_lock);
2447 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2448 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2449 spin_unlock_bh(&cmd->istate_lock);
2450 }
2451
2452 return IMMEDIATE_DATA_NORMAL_OPERATION;
2453 }
2454
2455 /*
2456 * Called with sess->conn_lock held.
2457 */
2458 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2459 with active network interface */
2460 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2461 {
2462 struct iscsi_cmd *cmd;
2463 struct iscsi_conn *conn_p;
2464
2465 /*
2466 * Only send a Asynchronous Message on connections whos network
2467 * interface is still functional.
2468 */
2469 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2470 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2471 iscsit_inc_conn_usage_count(conn_p);
2472 break;
2473 }
2474 }
2475
2476 if (!conn_p)
2477 return;
2478
2479 cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
2480 if (!cmd) {
2481 iscsit_dec_conn_usage_count(conn_p);
2482 return;
2483 }
2484
2485 cmd->logout_cid = conn->cid;
2486 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2487 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2488
2489 spin_lock_bh(&conn_p->cmd_lock);
2490 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2491 spin_unlock_bh(&conn_p->cmd_lock);
2492
2493 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2494 iscsit_dec_conn_usage_count(conn_p);
2495 }
2496
2497 static int iscsit_send_conn_drop_async_message(
2498 struct iscsi_cmd *cmd,
2499 struct iscsi_conn *conn)
2500 {
2501 struct iscsi_async *hdr;
2502
2503 cmd->tx_size = ISCSI_HDR_LEN;
2504 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2505
2506 hdr = (struct iscsi_async *) cmd->pdu;
2507 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2508 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2509 cmd->init_task_tag = RESERVED_ITT;
2510 cmd->targ_xfer_tag = 0xFFFFFFFF;
2511 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2512 cmd->stat_sn = conn->stat_sn++;
2513 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2514 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2515 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2516 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2517 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2518 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2519 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2520
2521 if (conn->conn_ops->HeaderDigest) {
2522 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2523
2524 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2525 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2526
2527 cmd->tx_size += ISCSI_CRC_LEN;
2528 pr_debug("Attaching CRC32C HeaderDigest to"
2529 " Async Message 0x%08x\n", *header_digest);
2530 }
2531
2532 cmd->iov_misc[0].iov_base = cmd->pdu;
2533 cmd->iov_misc[0].iov_len = cmd->tx_size;
2534 cmd->iov_misc_count = 1;
2535
2536 pr_debug("Sending Connection Dropped Async Message StatSN:"
2537 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2538 cmd->logout_cid, conn->cid);
2539 return 0;
2540 }
2541
2542 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
2543 {
2544 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2545 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2546 wait_for_completion_interruptible_timeout(
2547 &conn->tx_half_close_comp,
2548 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2549 }
2550 }
2551
2552 static void
2553 iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2554 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2555 bool set_statsn)
2556 {
2557 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2558 hdr->flags = datain->flags;
2559 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2560 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2561 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2562 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2563 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2564 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2565 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2566 }
2567 }
2568 hton24(hdr->dlength, datain->length);
2569 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2570 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2571 (struct scsi_lun *)&hdr->lun);
2572 else
2573 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2574
2575 hdr->itt = cmd->init_task_tag;
2576
2577 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2578 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2579 else
2580 hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2581 if (set_statsn)
2582 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2583 else
2584 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2585
2586 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2587 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2588 hdr->datasn = cpu_to_be32(datain->data_sn);
2589 hdr->offset = cpu_to_be32(datain->offset);
2590
2591 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2592 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2593 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2594 ntohl(hdr->offset), datain->length, conn->cid);
2595 }
2596
2597 static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2598 {
2599 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2600 struct iscsi_datain datain;
2601 struct iscsi_datain_req *dr;
2602 struct kvec *iov;
2603 u32 iov_count = 0, tx_size = 0;
2604 int eodr = 0, ret, iov_ret;
2605 bool set_statsn = false;
2606
2607 memset(&datain, 0, sizeof(struct iscsi_datain));
2608 dr = iscsit_get_datain_values(cmd, &datain);
2609 if (!dr) {
2610 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2611 cmd->init_task_tag);
2612 return -1;
2613 }
2614 /*
2615 * Be paranoid and double check the logic for now.
2616 */
2617 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2618 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2619 " datain.length: %u exceeds cmd->data_length: %u\n",
2620 cmd->init_task_tag, datain.offset, datain.length,
2621 cmd->se_cmd.data_length);
2622 return -1;
2623 }
2624
2625 atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2626 /*
2627 * Special case for successfully execution w/ both DATAIN
2628 * and Sense Data.
2629 */
2630 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2631 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2632 datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2633 else {
2634 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2635 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2636 iscsit_increment_maxcmdsn(cmd, conn->sess);
2637 cmd->stat_sn = conn->stat_sn++;
2638 set_statsn = true;
2639 } else if (dr->dr_complete ==
2640 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2641 set_statsn = true;
2642 }
2643
2644 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2645
2646 iov = &cmd->iov_data[0];
2647 iov[iov_count].iov_base = cmd->pdu;
2648 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
2649 tx_size += ISCSI_HDR_LEN;
2650
2651 if (conn->conn_ops->HeaderDigest) {
2652 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2653
2654 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
2655 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2656
2657 iov[0].iov_len += ISCSI_CRC_LEN;
2658 tx_size += ISCSI_CRC_LEN;
2659
2660 pr_debug("Attaching CRC32 HeaderDigest"
2661 " for DataIN PDU 0x%08x\n", *header_digest);
2662 }
2663
2664 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
2665 datain.offset, datain.length);
2666 if (iov_ret < 0)
2667 return -1;
2668
2669 iov_count += iov_ret;
2670 tx_size += datain.length;
2671
2672 cmd->padding = ((-datain.length) & 3);
2673 if (cmd->padding) {
2674 iov[iov_count].iov_base = cmd->pad_bytes;
2675 iov[iov_count++].iov_len = cmd->padding;
2676 tx_size += cmd->padding;
2677
2678 pr_debug("Attaching %u padding bytes\n",
2679 cmd->padding);
2680 }
2681 if (conn->conn_ops->DataDigest) {
2682 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
2683 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
2684
2685 iov[iov_count].iov_base = &cmd->data_crc;
2686 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2687 tx_size += ISCSI_CRC_LEN;
2688
2689 pr_debug("Attached CRC32C DataDigest %d bytes, crc"
2690 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
2691 }
2692
2693 cmd->iov_data_count = iov_count;
2694 cmd->tx_size = tx_size;
2695
2696 /* sendpage is preferred but can't insert markers */
2697 if (!conn->conn_ops->IFMarker)
2698 ret = iscsit_fe_sendpage_sg(cmd, conn);
2699 else
2700 ret = iscsit_send_tx_data(cmd, conn, 0);
2701
2702 iscsit_unmap_iovec(cmd);
2703
2704 if (ret < 0) {
2705 iscsit_tx_thread_wait_for_tcp(conn);
2706 return ret;
2707 }
2708
2709 if (dr->dr_complete) {
2710 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2711 2 : 1;
2712 iscsit_free_datain_req(cmd, dr);
2713 }
2714
2715 return eodr;
2716 }
2717
2718 int
2719 iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2720 struct iscsi_logout_rsp *hdr)
2721 {
2722 struct iscsi_conn *logout_conn = NULL;
2723 struct iscsi_conn_recovery *cr = NULL;
2724 struct iscsi_session *sess = conn->sess;
2725 /*
2726 * The actual shutting down of Sessions and/or Connections
2727 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2728 * is done in scsi_logout_post_handler().
2729 */
2730 switch (cmd->logout_reason) {
2731 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2732 pr_debug("iSCSI session logout successful, setting"
2733 " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2734 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2735 break;
2736 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2737 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2738 break;
2739 /*
2740 * For CLOSECONNECTION logout requests carrying
2741 * a matching logout CID -> local CID, the reference
2742 * for the local CID will have been incremented in
2743 * iscsi_logout_closeconnection().
2744 *
2745 * For CLOSECONNECTION logout requests carrying
2746 * a different CID than the connection it arrived
2747 * on, the connection responding to cmd->logout_cid
2748 * is stopped in iscsit_logout_post_handler_diffcid().
2749 */
2750
2751 pr_debug("iSCSI CID: %hu logout on CID: %hu"
2752 " successful.\n", cmd->logout_cid, conn->cid);
2753 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2754 break;
2755 case ISCSI_LOGOUT_REASON_RECOVERY:
2756 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2757 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2758 break;
2759 /*
2760 * If the connection is still active from our point of view
2761 * force connection recovery to occur.
2762 */
2763 logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2764 cmd->logout_cid);
2765 if (logout_conn) {
2766 iscsit_connection_reinstatement_rcfr(logout_conn);
2767 iscsit_dec_conn_usage_count(logout_conn);
2768 }
2769
2770 cr = iscsit_get_inactive_connection_recovery_entry(
2771 conn->sess, cmd->logout_cid);
2772 if (!cr) {
2773 pr_err("Unable to locate CID: %hu for"
2774 " REMOVECONNFORRECOVERY Logout Request.\n",
2775 cmd->logout_cid);
2776 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2777 break;
2778 }
2779
2780 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2781
2782 pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2783 " for recovery for CID: %hu on CID: %hu successful.\n",
2784 cmd->logout_cid, conn->cid);
2785 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2786 break;
2787 default:
2788 pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2789 cmd->logout_reason);
2790 return -1;
2791 }
2792
2793 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2794 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2795 hdr->response = cmd->logout_response;
2796 hdr->itt = cmd->init_task_tag;
2797 cmd->stat_sn = conn->stat_sn++;
2798 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2799
2800 iscsit_increment_maxcmdsn(cmd, conn->sess);
2801 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2802 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2803
2804 pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
2805 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2806 cmd->init_task_tag, cmd->stat_sn, hdr->response,
2807 cmd->logout_cid, conn->cid);
2808
2809 return 0;
2810 }
2811 EXPORT_SYMBOL(iscsit_build_logout_rsp);
2812
2813 static int
2814 iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2815 {
2816 struct kvec *iov;
2817 int niov = 0, tx_size, rc;
2818
2819 rc = iscsit_build_logout_rsp(cmd, conn,
2820 (struct iscsi_logout_rsp *)&cmd->pdu[0]);
2821 if (rc < 0)
2822 return rc;
2823
2824 tx_size = ISCSI_HDR_LEN;
2825 iov = &cmd->iov_misc[0];
2826 iov[niov].iov_base = cmd->pdu;
2827 iov[niov++].iov_len = ISCSI_HDR_LEN;
2828
2829 if (conn->conn_ops->HeaderDigest) {
2830 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2831
2832 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
2833 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2834
2835 iov[0].iov_len += ISCSI_CRC_LEN;
2836 tx_size += ISCSI_CRC_LEN;
2837 pr_debug("Attaching CRC32C HeaderDigest to"
2838 " Logout Response 0x%08x\n", *header_digest);
2839 }
2840 cmd->iov_misc_count = niov;
2841 cmd->tx_size = tx_size;
2842
2843 return 0;
2844 }
2845
2846 void
2847 iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2848 struct iscsi_nopin *hdr, bool nopout_response)
2849 {
2850 hdr->opcode = ISCSI_OP_NOOP_IN;
2851 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2852 hton24(hdr->dlength, cmd->buf_ptr_size);
2853 if (nopout_response)
2854 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2855 hdr->itt = cmd->init_task_tag;
2856 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2857 cmd->stat_sn = (nopout_response) ? conn->stat_sn++ :
2858 conn->stat_sn;
2859 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2860
2861 if (nopout_response)
2862 iscsit_increment_maxcmdsn(cmd, conn->sess);
2863
2864 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2865 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2866
2867 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
2868 " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
2869 "Solicitied" : "Unsolicitied", cmd->init_task_tag,
2870 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2871 }
2872 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
2873
2874 /*
2875 * Unsolicited NOPIN, either requesting a response or not.
2876 */
2877 static int iscsit_send_unsolicited_nopin(
2878 struct iscsi_cmd *cmd,
2879 struct iscsi_conn *conn,
2880 int want_response)
2881 {
2882 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
2883 int tx_size = ISCSI_HDR_LEN, ret;
2884
2885 iscsit_build_nopin_rsp(cmd, conn, hdr, false);
2886
2887 if (conn->conn_ops->HeaderDigest) {
2888 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2889
2890 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2891 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2892
2893 tx_size += ISCSI_CRC_LEN;
2894 pr_debug("Attaching CRC32C HeaderDigest to"
2895 " NopIN 0x%08x\n", *header_digest);
2896 }
2897
2898 cmd->iov_misc[0].iov_base = cmd->pdu;
2899 cmd->iov_misc[0].iov_len = tx_size;
2900 cmd->iov_misc_count = 1;
2901 cmd->tx_size = tx_size;
2902
2903 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
2904 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
2905
2906 ret = iscsit_send_tx_data(cmd, conn, 1);
2907 if (ret < 0) {
2908 iscsit_tx_thread_wait_for_tcp(conn);
2909 return ret;
2910 }
2911
2912 spin_lock_bh(&cmd->istate_lock);
2913 cmd->i_state = want_response ?
2914 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
2915 spin_unlock_bh(&cmd->istate_lock);
2916
2917 return 0;
2918 }
2919
2920 static int
2921 iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2922 {
2923 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
2924 struct kvec *iov;
2925 u32 padding = 0;
2926 int niov = 0, tx_size;
2927
2928 iscsit_build_nopin_rsp(cmd, conn, hdr, true);
2929
2930 tx_size = ISCSI_HDR_LEN;
2931 iov = &cmd->iov_misc[0];
2932 iov[niov].iov_base = cmd->pdu;
2933 iov[niov++].iov_len = ISCSI_HDR_LEN;
2934
2935 if (conn->conn_ops->HeaderDigest) {
2936 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2937
2938 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2939 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2940
2941 iov[0].iov_len += ISCSI_CRC_LEN;
2942 tx_size += ISCSI_CRC_LEN;
2943 pr_debug("Attaching CRC32C HeaderDigest"
2944 " to NopIn 0x%08x\n", *header_digest);
2945 }
2946
2947 /*
2948 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
2949 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
2950 */
2951 if (cmd->buf_ptr_size) {
2952 iov[niov].iov_base = cmd->buf_ptr;
2953 iov[niov++].iov_len = cmd->buf_ptr_size;
2954 tx_size += cmd->buf_ptr_size;
2955
2956 pr_debug("Echoing back %u bytes of ping"
2957 " data.\n", cmd->buf_ptr_size);
2958
2959 padding = ((-cmd->buf_ptr_size) & 3);
2960 if (padding != 0) {
2961 iov[niov].iov_base = &cmd->pad_bytes;
2962 iov[niov++].iov_len = padding;
2963 tx_size += padding;
2964 pr_debug("Attaching %u additional"
2965 " padding bytes.\n", padding);
2966 }
2967 if (conn->conn_ops->DataDigest) {
2968 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2969 cmd->buf_ptr, cmd->buf_ptr_size,
2970 padding, (u8 *)&cmd->pad_bytes,
2971 (u8 *)&cmd->data_crc);
2972
2973 iov[niov].iov_base = &cmd->data_crc;
2974 iov[niov++].iov_len = ISCSI_CRC_LEN;
2975 tx_size += ISCSI_CRC_LEN;
2976 pr_debug("Attached DataDigest for %u"
2977 " bytes of ping data, CRC 0x%08x\n",
2978 cmd->buf_ptr_size, cmd->data_crc);
2979 }
2980 }
2981
2982 cmd->iov_misc_count = niov;
2983 cmd->tx_size = tx_size;
2984
2985 return 0;
2986 }
2987
2988 static int iscsit_send_r2t(
2989 struct iscsi_cmd *cmd,
2990 struct iscsi_conn *conn)
2991 {
2992 int tx_size = 0;
2993 struct iscsi_r2t *r2t;
2994 struct iscsi_r2t_rsp *hdr;
2995 int ret;
2996
2997 r2t = iscsit_get_r2t_from_list(cmd);
2998 if (!r2t)
2999 return -1;
3000
3001 hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
3002 memset(hdr, 0, ISCSI_HDR_LEN);
3003 hdr->opcode = ISCSI_OP_R2T;
3004 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3005 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3006 (struct scsi_lun *)&hdr->lun);
3007 hdr->itt = cmd->init_task_tag;
3008 spin_lock_bh(&conn->sess->ttt_lock);
3009 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
3010 if (r2t->targ_xfer_tag == 0xFFFFFFFF)
3011 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
3012 spin_unlock_bh(&conn->sess->ttt_lock);
3013 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3014 hdr->statsn = cpu_to_be32(conn->stat_sn);
3015 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3016 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3017 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
3018 hdr->data_offset = cpu_to_be32(r2t->offset);
3019 hdr->data_length = cpu_to_be32(r2t->xfer_len);
3020
3021 cmd->iov_misc[0].iov_base = cmd->pdu;
3022 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3023 tx_size += ISCSI_HDR_LEN;
3024
3025 if (conn->conn_ops->HeaderDigest) {
3026 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3027
3028 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3029 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3030
3031 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3032 tx_size += ISCSI_CRC_LEN;
3033 pr_debug("Attaching CRC32 HeaderDigest for R2T"
3034 " PDU 0x%08x\n", *header_digest);
3035 }
3036
3037 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3038 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3039 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3040 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3041 r2t->offset, r2t->xfer_len, conn->cid);
3042
3043 cmd->iov_misc_count = 1;
3044 cmd->tx_size = tx_size;
3045
3046 spin_lock_bh(&cmd->r2t_lock);
3047 r2t->sent_r2t = 1;
3048 spin_unlock_bh(&cmd->r2t_lock);
3049
3050 ret = iscsit_send_tx_data(cmd, conn, 1);
3051 if (ret < 0) {
3052 iscsit_tx_thread_wait_for_tcp(conn);
3053 return ret;
3054 }
3055
3056 spin_lock_bh(&cmd->dataout_timeout_lock);
3057 iscsit_start_dataout_timer(cmd, conn);
3058 spin_unlock_bh(&cmd->dataout_timeout_lock);
3059
3060 return 0;
3061 }
3062
3063 /*
3064 * @recovery: If called from iscsi_task_reassign_complete_write() for
3065 * connection recovery.
3066 */
3067 int iscsit_build_r2ts_for_cmd(
3068 struct iscsi_conn *conn,
3069 struct iscsi_cmd *cmd,
3070 bool recovery)
3071 {
3072 int first_r2t = 1;
3073 u32 offset = 0, xfer_len = 0;
3074
3075 spin_lock_bh(&cmd->r2t_lock);
3076 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
3077 spin_unlock_bh(&cmd->r2t_lock);
3078 return 0;
3079 }
3080
3081 if (conn->sess->sess_ops->DataSequenceInOrder &&
3082 !recovery)
3083 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
3084
3085 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
3086 if (conn->sess->sess_ops->DataSequenceInOrder) {
3087 offset = cmd->r2t_offset;
3088
3089 if (first_r2t && recovery) {
3090 int new_data_end = offset +
3091 conn->sess->sess_ops->MaxBurstLength -
3092 cmd->next_burst_len;
3093
3094 if (new_data_end > cmd->se_cmd.data_length)
3095 xfer_len = cmd->se_cmd.data_length - offset;
3096 else
3097 xfer_len =
3098 conn->sess->sess_ops->MaxBurstLength -
3099 cmd->next_burst_len;
3100 } else {
3101 int new_data_end = offset +
3102 conn->sess->sess_ops->MaxBurstLength;
3103
3104 if (new_data_end > cmd->se_cmd.data_length)
3105 xfer_len = cmd->se_cmd.data_length - offset;
3106 else
3107 xfer_len = conn->sess->sess_ops->MaxBurstLength;
3108 }
3109 cmd->r2t_offset += xfer_len;
3110
3111 if (cmd->r2t_offset == cmd->se_cmd.data_length)
3112 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3113 } else {
3114 struct iscsi_seq *seq;
3115
3116 seq = iscsit_get_seq_holder_for_r2t(cmd);
3117 if (!seq) {
3118 spin_unlock_bh(&cmd->r2t_lock);
3119 return -1;
3120 }
3121
3122 offset = seq->offset;
3123 xfer_len = seq->xfer_len;
3124
3125 if (cmd->seq_send_order == cmd->seq_count)
3126 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3127 }
3128 cmd->outstanding_r2ts++;
3129 first_r2t = 0;
3130
3131 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3132 spin_unlock_bh(&cmd->r2t_lock);
3133 return -1;
3134 }
3135
3136 if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3137 break;
3138 }
3139 spin_unlock_bh(&cmd->r2t_lock);
3140
3141 return 0;
3142 }
3143
3144 void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3145 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3146 {
3147 if (inc_stat_sn)
3148 cmd->stat_sn = conn->stat_sn++;
3149
3150 atomic_long_inc(&conn->sess->rsp_pdus);
3151
3152 memset(hdr, 0, ISCSI_HDR_LEN);
3153 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3154 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3155 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3156 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3157 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3158 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3159 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3160 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3161 }
3162 hdr->response = cmd->iscsi_response;
3163 hdr->cmd_status = cmd->se_cmd.scsi_status;
3164 hdr->itt = cmd->init_task_tag;
3165 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3166
3167 iscsit_increment_maxcmdsn(cmd, conn->sess);
3168 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3169 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3170
3171 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3172 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3173 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3174 cmd->se_cmd.scsi_status, conn->cid);
3175 }
3176 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3177
3178 static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3179 {
3180 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3181 struct kvec *iov;
3182 u32 padding = 0, tx_size = 0;
3183 int iov_count = 0;
3184 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3185
3186 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3187
3188 iov = &cmd->iov_misc[0];
3189 iov[iov_count].iov_base = cmd->pdu;
3190 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3191 tx_size += ISCSI_HDR_LEN;
3192
3193 /*
3194 * Attach SENSE DATA payload to iSCSI Response PDU
3195 */
3196 if (cmd->se_cmd.sense_buffer &&
3197 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3198 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3199 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3200 cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3201
3202 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3203 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3204 iov[iov_count].iov_base = cmd->sense_buffer;
3205 iov[iov_count++].iov_len =
3206 (cmd->se_cmd.scsi_sense_length + padding);
3207 tx_size += cmd->se_cmd.scsi_sense_length;
3208
3209 if (padding) {
3210 memset(cmd->sense_buffer +
3211 cmd->se_cmd.scsi_sense_length, 0, padding);
3212 tx_size += padding;
3213 pr_debug("Adding %u bytes of padding to"
3214 " SENSE.\n", padding);
3215 }
3216
3217 if (conn->conn_ops->DataDigest) {
3218 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3219 cmd->sense_buffer,
3220 (cmd->se_cmd.scsi_sense_length + padding),
3221 0, NULL, (u8 *)&cmd->data_crc);
3222
3223 iov[iov_count].iov_base = &cmd->data_crc;
3224 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3225 tx_size += ISCSI_CRC_LEN;
3226
3227 pr_debug("Attaching CRC32 DataDigest for"
3228 " SENSE, %u bytes CRC 0x%08x\n",
3229 (cmd->se_cmd.scsi_sense_length + padding),
3230 cmd->data_crc);
3231 }
3232
3233 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3234 " Response PDU\n",
3235 cmd->se_cmd.scsi_sense_length);
3236 }
3237
3238 if (conn->conn_ops->HeaderDigest) {
3239 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3240
3241 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
3242 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3243
3244 iov[0].iov_len += ISCSI_CRC_LEN;
3245 tx_size += ISCSI_CRC_LEN;
3246 pr_debug("Attaching CRC32 HeaderDigest for Response"
3247 " PDU 0x%08x\n", *header_digest);
3248 }
3249
3250 cmd->iov_misc_count = iov_count;
3251 cmd->tx_size = tx_size;
3252
3253 return 0;
3254 }
3255
3256 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3257 {
3258 switch (se_tmr->response) {
3259 case TMR_FUNCTION_COMPLETE:
3260 return ISCSI_TMF_RSP_COMPLETE;
3261 case TMR_TASK_DOES_NOT_EXIST:
3262 return ISCSI_TMF_RSP_NO_TASK;
3263 case TMR_LUN_DOES_NOT_EXIST:
3264 return ISCSI_TMF_RSP_NO_LUN;
3265 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3266 return ISCSI_TMF_RSP_NOT_SUPPORTED;
3267 case TMR_FUNCTION_REJECTED:
3268 default:
3269 return ISCSI_TMF_RSP_REJECTED;
3270 }
3271 }
3272
3273 void
3274 iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3275 struct iscsi_tm_rsp *hdr)
3276 {
3277 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3278
3279 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3280 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3281 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3282 hdr->itt = cmd->init_task_tag;
3283 cmd->stat_sn = conn->stat_sn++;
3284 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3285
3286 iscsit_increment_maxcmdsn(cmd, conn->sess);
3287 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3288 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3289
3290 pr_debug("Built Task Management Response ITT: 0x%08x,"
3291 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3292 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3293 }
3294 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3295
3296 static int
3297 iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3298 {
3299 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3300 u32 tx_size = 0;
3301
3302 iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3303
3304 cmd->iov_misc[0].iov_base = cmd->pdu;
3305 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3306 tx_size += ISCSI_HDR_LEN;
3307
3308 if (conn->conn_ops->HeaderDigest) {
3309 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3310
3311 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3312 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3313
3314 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3315 tx_size += ISCSI_CRC_LEN;
3316 pr_debug("Attaching CRC32 HeaderDigest for Task"
3317 " Mgmt Response PDU 0x%08x\n", *header_digest);
3318 }
3319
3320 cmd->iov_misc_count = 1;
3321 cmd->tx_size = tx_size;
3322
3323 return 0;
3324 }
3325
3326 static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3327 {
3328 bool ret = false;
3329
3330 if (np->np_sockaddr.ss_family == AF_INET6) {
3331 const struct sockaddr_in6 sin6 = {
3332 .sin6_addr = IN6ADDR_ANY_INIT };
3333 struct sockaddr_in6 *sock_in6 =
3334 (struct sockaddr_in6 *)&np->np_sockaddr;
3335
3336 if (!memcmp(sock_in6->sin6_addr.s6_addr,
3337 sin6.sin6_addr.s6_addr, 16))
3338 ret = true;
3339 } else {
3340 struct sockaddr_in * sock_in =
3341 (struct sockaddr_in *)&np->np_sockaddr;
3342
3343 if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY))
3344 ret = true;
3345 }
3346
3347 return ret;
3348 }
3349
3350 #define SENDTARGETS_BUF_LIMIT 32768U
3351
3352 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3353 {
3354 char *payload = NULL;
3355 struct iscsi_conn *conn = cmd->conn;
3356 struct iscsi_portal_group *tpg;
3357 struct iscsi_tiqn *tiqn;
3358 struct iscsi_tpg_np *tpg_np;
3359 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3360 int target_name_printed;
3361 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3362 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3363
3364 buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
3365 SENDTARGETS_BUF_LIMIT);
3366
3367 payload = kzalloc(buffer_len, GFP_KERNEL);
3368 if (!payload) {
3369 pr_err("Unable to allocate memory for sendtargets"
3370 " response.\n");
3371 return -ENOMEM;
3372 }
3373 /*
3374 * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
3375 * explicit case..
3376 */
3377 if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
3378 text_ptr = strchr(text_in, '=');
3379 if (!text_ptr) {
3380 pr_err("Unable to locate '=' string in text_in:"
3381 " %s\n", text_in);
3382 kfree(payload);
3383 return -EINVAL;
3384 }
3385 /*
3386 * Skip over '=' character..
3387 */
3388 text_ptr += 1;
3389 }
3390
3391 spin_lock(&tiqn_lock);
3392 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3393 if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
3394 strcmp(tiqn->tiqn, text_ptr)) {
3395 continue;
3396 }
3397
3398 target_name_printed = 0;
3399
3400 spin_lock(&tiqn->tiqn_tpg_lock);
3401 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3402
3403 /* If demo_mode_discovery=0 and generate_node_acls=0
3404 * (demo mode dislabed) do not return
3405 * TargetName+TargetAddress unless a NodeACL exists.
3406 */
3407
3408 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3409 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3410 (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
3411 cmd->conn->sess->sess_ops->InitiatorName))) {
3412 continue;
3413 }
3414
3415 spin_lock(&tpg->tpg_state_lock);
3416 if ((tpg->tpg_state == TPG_STATE_FREE) ||
3417 (tpg->tpg_state == TPG_STATE_INACTIVE)) {
3418 spin_unlock(&tpg->tpg_state_lock);
3419 continue;
3420 }
3421 spin_unlock(&tpg->tpg_state_lock);
3422
3423 spin_lock(&tpg->tpg_np_lock);
3424 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3425 tpg_np_list) {
3426 struct iscsi_np *np = tpg_np->tpg_np;
3427 bool inaddr_any = iscsit_check_inaddr_any(np);
3428
3429 if (!target_name_printed) {
3430 len = sprintf(buf, "TargetName=%s",
3431 tiqn->tiqn);
3432 len += 1;
3433
3434 if ((len + payload_len) > buffer_len) {
3435 spin_unlock(&tpg->tpg_np_lock);
3436 spin_unlock(&tiqn->tiqn_tpg_lock);
3437 end_of_buf = 1;
3438 goto eob;
3439 }
3440 memcpy(payload + payload_len, buf, len);
3441 payload_len += len;
3442 target_name_printed = 1;
3443 }
3444
3445 len = sprintf(buf, "TargetAddress="
3446 "%s:%hu,%hu",
3447 (inaddr_any == false) ?
3448 np->np_ip : conn->local_ip,
3449 (inaddr_any == false) ?
3450 np->np_port : conn->local_port,
3451 tpg->tpgt);
3452 len += 1;
3453
3454 if ((len + payload_len) > buffer_len) {
3455 spin_unlock(&tpg->tpg_np_lock);
3456 spin_unlock(&tiqn->tiqn_tpg_lock);
3457 end_of_buf = 1;
3458 goto eob;
3459 }
3460 memcpy(payload + payload_len, buf, len);
3461 payload_len += len;
3462 }
3463 spin_unlock(&tpg->tpg_np_lock);
3464 }
3465 spin_unlock(&tiqn->tiqn_tpg_lock);
3466 eob:
3467 if (end_of_buf)
3468 break;
3469
3470 if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
3471 break;
3472 }
3473 spin_unlock(&tiqn_lock);
3474
3475 cmd->buf_ptr = payload;
3476
3477 return payload_len;
3478 }
3479
3480 int
3481 iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3482 struct iscsi_text_rsp *hdr)
3483 {
3484 int text_length, padding;
3485
3486 text_length = iscsit_build_sendtargets_response(cmd);
3487 if (text_length < 0)
3488 return text_length;
3489
3490 hdr->opcode = ISCSI_OP_TEXT_RSP;
3491 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3492 padding = ((-text_length) & 3);
3493 hton24(hdr->dlength, text_length);
3494 hdr->itt = cmd->init_task_tag;
3495 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3496 cmd->stat_sn = conn->stat_sn++;
3497 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3498
3499 iscsit_increment_maxcmdsn(cmd, conn->sess);
3500 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3501 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3502
3503 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
3504 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
3505 text_length, conn->cid);
3506
3507 return text_length + padding;
3508 }
3509 EXPORT_SYMBOL(iscsit_build_text_rsp);
3510
3511 /*
3512 * FIXME: Add support for F_BIT and C_BIT when the length is longer than
3513 * MaxRecvDataSegmentLength.
3514 */
3515 static int iscsit_send_text_rsp(
3516 struct iscsi_cmd *cmd,
3517 struct iscsi_conn *conn)
3518 {
3519 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3520 struct kvec *iov;
3521 u32 tx_size = 0;
3522 int text_length, iov_count = 0, rc;
3523
3524 rc = iscsit_build_text_rsp(cmd, conn, hdr);
3525 if (rc < 0)
3526 return rc;
3527
3528 text_length = rc;
3529 iov = &cmd->iov_misc[0];
3530 iov[iov_count].iov_base = cmd->pdu;
3531 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3532 iov[iov_count].iov_base = cmd->buf_ptr;
3533 iov[iov_count++].iov_len = text_length;
3534
3535 tx_size += (ISCSI_HDR_LEN + text_length);
3536
3537 if (conn->conn_ops->HeaderDigest) {
3538 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3539
3540 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3541 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3542
3543 iov[0].iov_len += ISCSI_CRC_LEN;
3544 tx_size += ISCSI_CRC_LEN;
3545 pr_debug("Attaching CRC32 HeaderDigest for"
3546 " Text Response PDU 0x%08x\n", *header_digest);
3547 }
3548
3549 if (conn->conn_ops->DataDigest) {
3550 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3551 cmd->buf_ptr, text_length,
3552 0, NULL, (u8 *)&cmd->data_crc);
3553
3554 iov[iov_count].iov_base = &cmd->data_crc;
3555 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3556 tx_size += ISCSI_CRC_LEN;
3557
3558 pr_debug("Attaching DataDigest for %u bytes of text"
3559 " data, CRC 0x%08x\n", text_length,
3560 cmd->data_crc);
3561 }
3562
3563 cmd->iov_misc_count = iov_count;
3564 cmd->tx_size = tx_size;
3565
3566 return 0;
3567 }
3568
3569 void
3570 iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3571 struct iscsi_reject *hdr)
3572 {
3573 hdr->opcode = ISCSI_OP_REJECT;
3574 hdr->reason = cmd->reject_reason;
3575 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3576 hton24(hdr->dlength, ISCSI_HDR_LEN);
3577 hdr->ffffffff = cpu_to_be32(0xffffffff);
3578 cmd->stat_sn = conn->stat_sn++;
3579 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3580 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3581 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3582
3583 }
3584 EXPORT_SYMBOL(iscsit_build_reject);
3585
3586 static int iscsit_send_reject(
3587 struct iscsi_cmd *cmd,
3588 struct iscsi_conn *conn)
3589 {
3590 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3591 struct kvec *iov;
3592 u32 iov_count = 0, tx_size;
3593
3594 iscsit_build_reject(cmd, conn, hdr);
3595
3596 iov = &cmd->iov_misc[0];
3597 iov[iov_count].iov_base = cmd->pdu;
3598 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3599 iov[iov_count].iov_base = cmd->buf_ptr;
3600 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3601
3602 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
3603
3604 if (conn->conn_ops->HeaderDigest) {
3605 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3606
3607 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3608 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3609
3610 iov[0].iov_len += ISCSI_CRC_LEN;
3611 tx_size += ISCSI_CRC_LEN;
3612 pr_debug("Attaching CRC32 HeaderDigest for"
3613 " REJECT PDU 0x%08x\n", *header_digest);
3614 }
3615
3616 if (conn->conn_ops->DataDigest) {
3617 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
3618 ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
3619
3620 iov[iov_count].iov_base = &cmd->data_crc;
3621 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3622 tx_size += ISCSI_CRC_LEN;
3623 pr_debug("Attaching CRC32 DataDigest for REJECT"
3624 " PDU 0x%08x\n", cmd->data_crc);
3625 }
3626
3627 cmd->iov_misc_count = iov_count;
3628 cmd->tx_size = tx_size;
3629
3630 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3631 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3632
3633 return 0;
3634 }
3635
3636 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3637 {
3638 struct iscsi_thread_set *ts = conn->thread_set;
3639 int ord, cpu;
3640 /*
3641 * thread_id is assigned from iscsit_global->ts_bitmap from
3642 * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
3643 *
3644 * Here we use thread_id to determine which CPU that this
3645 * iSCSI connection's iscsi_thread_set will be scheduled to
3646 * execute upon.
3647 */
3648 ord = ts->thread_id % cpumask_weight(cpu_online_mask);
3649 for_each_online_cpu(cpu) {
3650 if (ord-- == 0) {
3651 cpumask_set_cpu(cpu, conn->conn_cpumask);
3652 return;
3653 }
3654 }
3655 /*
3656 * This should never be reached..
3657 */
3658 dump_stack();
3659 cpumask_setall(conn->conn_cpumask);
3660 }
3661
3662 static inline void iscsit_thread_check_cpumask(
3663 struct iscsi_conn *conn,
3664 struct task_struct *p,
3665 int mode)
3666 {
3667 char buf[128];
3668 /*
3669 * mode == 1 signals iscsi_target_tx_thread() usage.
3670 * mode == 0 signals iscsi_target_rx_thread() usage.
3671 */
3672 if (mode == 1) {
3673 if (!conn->conn_tx_reset_cpumask)
3674 return;
3675 conn->conn_tx_reset_cpumask = 0;
3676 } else {
3677 if (!conn->conn_rx_reset_cpumask)
3678 return;
3679 conn->conn_rx_reset_cpumask = 0;
3680 }
3681 /*
3682 * Update the CPU mask for this single kthread so that
3683 * both TX and RX kthreads are scheduled to run on the
3684 * same CPU.
3685 */
3686 memset(buf, 0, 128);
3687 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3688 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3689 }
3690
3691 static int
3692 iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3693 {
3694 int ret;
3695
3696 switch (state) {
3697 case ISTATE_SEND_R2T:
3698 ret = iscsit_send_r2t(cmd, conn);
3699 if (ret < 0)
3700 goto err;
3701 break;
3702 case ISTATE_REMOVE:
3703 spin_lock_bh(&conn->cmd_lock);
3704 list_del(&cmd->i_conn_node);
3705 spin_unlock_bh(&conn->cmd_lock);
3706
3707 iscsit_free_cmd(cmd, false);
3708 break;
3709 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3710 iscsit_mod_nopin_response_timer(conn);
3711 ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3712 if (ret < 0)
3713 goto err;
3714 break;
3715 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3716 ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3717 if (ret < 0)
3718 goto err;
3719 break;
3720 default:
3721 pr_err("Unknown Opcode: 0x%02x ITT:"
3722 " 0x%08x, i_state: %d on CID: %hu\n",
3723 cmd->iscsi_opcode, cmd->init_task_tag, state,
3724 conn->cid);
3725 goto err;
3726 }
3727
3728 return 0;
3729
3730 err:
3731 return -1;
3732 }
3733
3734 static int
3735 iscsit_handle_immediate_queue(struct iscsi_conn *conn)
3736 {
3737 struct iscsit_transport *t = conn->conn_transport;
3738 struct iscsi_queue_req *qr;
3739 struct iscsi_cmd *cmd;
3740 u8 state;
3741 int ret;
3742
3743 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3744 atomic_set(&conn->check_immediate_queue, 0);
3745 cmd = qr->cmd;
3746 state = qr->state;
3747 kmem_cache_free(lio_qr_cache, qr);
3748
3749 ret = t->iscsit_immediate_queue(conn, cmd, state);
3750 if (ret < 0)
3751 return ret;
3752 }
3753
3754 return 0;
3755 }
3756
3757 static int
3758 iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3759 {
3760 int ret;
3761
3762 check_rsp_state:
3763 switch (state) {
3764 case ISTATE_SEND_DATAIN:
3765 ret = iscsit_send_datain(cmd, conn);
3766 if (ret < 0)
3767 goto err;
3768 else if (!ret)
3769 /* more drs */
3770 goto check_rsp_state;
3771 else if (ret == 1) {
3772 /* all done */
3773 spin_lock_bh(&cmd->istate_lock);
3774 cmd->i_state = ISTATE_SENT_STATUS;
3775 spin_unlock_bh(&cmd->istate_lock);
3776
3777 if (atomic_read(&conn->check_immediate_queue))
3778 return 1;
3779
3780 return 0;
3781 } else if (ret == 2) {
3782 /* Still must send status,
3783 SCF_TRANSPORT_TASK_SENSE was set */
3784 spin_lock_bh(&cmd->istate_lock);
3785 cmd->i_state = ISTATE_SEND_STATUS;
3786 spin_unlock_bh(&cmd->istate_lock);
3787 state = ISTATE_SEND_STATUS;
3788 goto check_rsp_state;
3789 }
3790
3791 break;
3792 case ISTATE_SEND_STATUS:
3793 case ISTATE_SEND_STATUS_RECOVERY:
3794 ret = iscsit_send_response(cmd, conn);
3795 break;
3796 case ISTATE_SEND_LOGOUTRSP:
3797 ret = iscsit_send_logout(cmd, conn);
3798 break;
3799 case ISTATE_SEND_ASYNCMSG:
3800 ret = iscsit_send_conn_drop_async_message(
3801 cmd, conn);
3802 break;
3803 case ISTATE_SEND_NOPIN:
3804 ret = iscsit_send_nopin(cmd, conn);
3805 break;
3806 case ISTATE_SEND_REJECT:
3807 ret = iscsit_send_reject(cmd, conn);
3808 break;
3809 case ISTATE_SEND_TASKMGTRSP:
3810 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3811 if (ret != 0)
3812 break;
3813 ret = iscsit_tmr_post_handler(cmd, conn);
3814 if (ret != 0)
3815 iscsit_fall_back_to_erl0(conn->sess);
3816 break;
3817 case ISTATE_SEND_TEXTRSP:
3818 ret = iscsit_send_text_rsp(cmd, conn);
3819 break;
3820 default:
3821 pr_err("Unknown Opcode: 0x%02x ITT:"
3822 " 0x%08x, i_state: %d on CID: %hu\n",
3823 cmd->iscsi_opcode, cmd->init_task_tag,
3824 state, conn->cid);
3825 goto err;
3826 }
3827 if (ret < 0)
3828 goto err;
3829
3830 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3831 iscsit_tx_thread_wait_for_tcp(conn);
3832 iscsit_unmap_iovec(cmd);
3833 goto err;
3834 }
3835 iscsit_unmap_iovec(cmd);
3836
3837 switch (state) {
3838 case ISTATE_SEND_LOGOUTRSP:
3839 if (!iscsit_logout_post_handler(cmd, conn))
3840 goto restart;
3841 /* fall through */
3842 case ISTATE_SEND_STATUS:
3843 case ISTATE_SEND_ASYNCMSG:
3844 case ISTATE_SEND_NOPIN:
3845 case ISTATE_SEND_STATUS_RECOVERY:
3846 case ISTATE_SEND_TEXTRSP:
3847 case ISTATE_SEND_TASKMGTRSP:
3848 case ISTATE_SEND_REJECT:
3849 spin_lock_bh(&cmd->istate_lock);
3850 cmd->i_state = ISTATE_SENT_STATUS;
3851 spin_unlock_bh(&cmd->istate_lock);
3852 break;
3853 default:
3854 pr_err("Unknown Opcode: 0x%02x ITT:"
3855 " 0x%08x, i_state: %d on CID: %hu\n",
3856 cmd->iscsi_opcode, cmd->init_task_tag,
3857 cmd->i_state, conn->cid);
3858 goto err;
3859 }
3860
3861 if (atomic_read(&conn->check_immediate_queue))
3862 return 1;
3863
3864 return 0;
3865
3866 err:
3867 return -1;
3868 restart:
3869 return -EAGAIN;
3870 }
3871
3872 static int iscsit_handle_response_queue(struct iscsi_conn *conn)
3873 {
3874 struct iscsit_transport *t = conn->conn_transport;
3875 struct iscsi_queue_req *qr;
3876 struct iscsi_cmd *cmd;
3877 u8 state;
3878 int ret;
3879
3880 while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3881 cmd = qr->cmd;
3882 state = qr->state;
3883 kmem_cache_free(lio_qr_cache, qr);
3884
3885 ret = t->iscsit_response_queue(conn, cmd, state);
3886 if (ret == 1 || ret < 0)
3887 return ret;
3888 }
3889
3890 return 0;
3891 }
3892
3893 int iscsi_target_tx_thread(void *arg)
3894 {
3895 int ret = 0;
3896 struct iscsi_conn *conn;
3897 struct iscsi_thread_set *ts = arg;
3898 /*
3899 * Allow ourselves to be interrupted by SIGINT so that a
3900 * connection recovery / failure event can be triggered externally.
3901 */
3902 allow_signal(SIGINT);
3903
3904 restart:
3905 conn = iscsi_tx_thread_pre_handler(ts);
3906 if (!conn)
3907 goto out;
3908
3909 ret = 0;
3910
3911 while (!kthread_should_stop()) {
3912 /*
3913 * Ensure that both TX and RX per connection kthreads
3914 * are scheduled to run on the same CPU.
3915 */
3916 iscsit_thread_check_cpumask(conn, current, 1);
3917
3918 wait_event_interruptible(conn->queues_wq,
3919 !iscsit_conn_all_queues_empty(conn) ||
3920 ts->status == ISCSI_THREAD_SET_RESET);
3921
3922 if ((ts->status == ISCSI_THREAD_SET_RESET) ||
3923 signal_pending(current))
3924 goto transport_err;
3925
3926 get_immediate:
3927 ret = iscsit_handle_immediate_queue(conn);
3928 if (ret < 0)
3929 goto transport_err;
3930
3931 ret = iscsit_handle_response_queue(conn);
3932 if (ret == 1)
3933 goto get_immediate;
3934 else if (ret == -EAGAIN)
3935 goto restart;
3936 else if (ret < 0)
3937 goto transport_err;
3938 }
3939
3940 transport_err:
3941 iscsit_take_action_for_connection_exit(conn);
3942 goto restart;
3943 out:
3944 return 0;
3945 }
3946
3947 static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3948 {
3949 struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3950 struct iscsi_cmd *cmd;
3951 int ret = 0;
3952
3953 switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3954 case ISCSI_OP_SCSI_CMD:
3955 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3956 if (!cmd)
3957 goto reject;
3958
3959 ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3960 break;
3961 case ISCSI_OP_SCSI_DATA_OUT:
3962 ret = iscsit_handle_data_out(conn, buf);
3963 break;
3964 case ISCSI_OP_NOOP_OUT:
3965 cmd = NULL;
3966 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3967 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3968 if (!cmd)
3969 goto reject;
3970 }
3971 ret = iscsit_handle_nop_out(conn, cmd, buf);
3972 break;
3973 case ISCSI_OP_SCSI_TMFUNC:
3974 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3975 if (!cmd)
3976 goto reject;
3977
3978 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
3979 break;
3980 case ISCSI_OP_TEXT:
3981 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3982 if (!cmd)
3983 goto reject;
3984
3985 ret = iscsit_handle_text_cmd(conn, cmd, buf);
3986 break;
3987 case ISCSI_OP_LOGOUT:
3988 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3989 if (!cmd)
3990 goto reject;
3991
3992 ret = iscsit_handle_logout_cmd(conn, cmd, buf);
3993 if (ret > 0)
3994 wait_for_completion_timeout(&conn->conn_logout_comp,
3995 SECONDS_FOR_LOGOUT_COMP * HZ);
3996 break;
3997 case ISCSI_OP_SNACK:
3998 ret = iscsit_handle_snack(conn, buf);
3999 break;
4000 default:
4001 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
4002 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
4003 pr_err("Cannot recover from unknown"
4004 " opcode while ERL=0, closing iSCSI connection.\n");
4005 return -1;
4006 }
4007 if (!conn->conn_ops->OFMarker) {
4008 pr_err("Unable to recover from unknown"
4009 " opcode while OFMarker=No, closing iSCSI"
4010 " connection.\n");
4011 return -1;
4012 }
4013 if (iscsit_recover_from_unknown_opcode(conn) < 0) {
4014 pr_err("Unable to recover from unknown"
4015 " opcode, closing iSCSI connection.\n");
4016 return -1;
4017 }
4018 break;
4019 }
4020
4021 return ret;
4022 reject:
4023 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4024 }
4025
4026 int iscsi_target_rx_thread(void *arg)
4027 {
4028 int ret;
4029 u8 buffer[ISCSI_HDR_LEN], opcode;
4030 u32 checksum = 0, digest = 0;
4031 struct iscsi_conn *conn = NULL;
4032 struct iscsi_thread_set *ts = arg;
4033 struct kvec iov;
4034 /*
4035 * Allow ourselves to be interrupted by SIGINT so that a
4036 * connection recovery / failure event can be triggered externally.
4037 */
4038 allow_signal(SIGINT);
4039
4040 restart:
4041 conn = iscsi_rx_thread_pre_handler(ts);
4042 if (!conn)
4043 goto out;
4044
4045 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4046 struct completion comp;
4047 int rc;
4048
4049 init_completion(&comp);
4050 rc = wait_for_completion_interruptible(&comp);
4051 if (rc < 0)
4052 goto transport_err;
4053
4054 goto out;
4055 }
4056
4057 while (!kthread_should_stop()) {
4058 /*
4059 * Ensure that both TX and RX per connection kthreads
4060 * are scheduled to run on the same CPU.
4061 */
4062 iscsit_thread_check_cpumask(conn, current, 0);
4063
4064 memset(buffer, 0, ISCSI_HDR_LEN);
4065 memset(&iov, 0, sizeof(struct kvec));
4066
4067 iov.iov_base = buffer;
4068 iov.iov_len = ISCSI_HDR_LEN;
4069
4070 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
4071 if (ret != ISCSI_HDR_LEN) {
4072 iscsit_rx_thread_wait_for_tcp(conn);
4073 goto transport_err;
4074 }
4075
4076 if (conn->conn_ops->HeaderDigest) {
4077 iov.iov_base = &digest;
4078 iov.iov_len = ISCSI_CRC_LEN;
4079
4080 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
4081 if (ret != ISCSI_CRC_LEN) {
4082 iscsit_rx_thread_wait_for_tcp(conn);
4083 goto transport_err;
4084 }
4085
4086 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
4087 buffer, ISCSI_HDR_LEN,
4088 0, NULL, (u8 *)&checksum);
4089
4090 if (digest != checksum) {
4091 pr_err("HeaderDigest CRC32C failed,"
4092 " received 0x%08x, computed 0x%08x\n",
4093 digest, checksum);
4094 /*
4095 * Set the PDU to 0xff so it will intentionally
4096 * hit default in the switch below.
4097 */
4098 memset(buffer, 0xff, ISCSI_HDR_LEN);
4099 atomic_long_inc(&conn->sess->conn_digest_errors);
4100 } else {
4101 pr_debug("Got HeaderDigest CRC32C"
4102 " 0x%08x\n", checksum);
4103 }
4104 }
4105
4106 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4107 goto transport_err;
4108
4109 opcode = buffer[0] & ISCSI_OPCODE_MASK;
4110
4111 if (conn->sess->sess_ops->SessionType &&
4112 ((!(opcode & ISCSI_OP_TEXT)) ||
4113 (!(opcode & ISCSI_OP_LOGOUT)))) {
4114 pr_err("Received illegal iSCSI Opcode: 0x%02x"
4115 " while in Discovery Session, rejecting.\n", opcode);
4116 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4117 buffer);
4118 goto transport_err;
4119 }
4120
4121 ret = iscsi_target_rx_opcode(conn, buffer);
4122 if (ret < 0)
4123 goto transport_err;
4124 }
4125
4126 transport_err:
4127 if (!signal_pending(current))
4128 atomic_set(&conn->transport_failed, 1);
4129 iscsit_take_action_for_connection_exit(conn);
4130 goto restart;
4131 out:
4132 return 0;
4133 }
4134
4135 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
4136 {
4137 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
4138 struct iscsi_session *sess = conn->sess;
4139 /*
4140 * We expect this function to only ever be called from either RX or TX
4141 * thread context via iscsit_close_connection() once the other context
4142 * has been reset -> returned sleeping pre-handler state.
4143 */
4144 spin_lock_bh(&conn->cmd_lock);
4145 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
4146
4147 list_del(&cmd->i_conn_node);
4148 spin_unlock_bh(&conn->cmd_lock);
4149
4150 iscsit_increment_maxcmdsn(cmd, sess);
4151
4152 iscsit_free_cmd(cmd, true);
4153
4154 spin_lock_bh(&conn->cmd_lock);
4155 }
4156 spin_unlock_bh(&conn->cmd_lock);
4157 }
4158
4159 static void iscsit_stop_timers_for_cmds(
4160 struct iscsi_conn *conn)
4161 {
4162 struct iscsi_cmd *cmd;
4163
4164 spin_lock_bh(&conn->cmd_lock);
4165 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
4166 if (cmd->data_direction == DMA_TO_DEVICE)
4167 iscsit_stop_dataout_timer(cmd);
4168 }
4169 spin_unlock_bh(&conn->cmd_lock);
4170 }
4171
4172 int iscsit_close_connection(
4173 struct iscsi_conn *conn)
4174 {
4175 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
4176 struct iscsi_session *sess = conn->sess;
4177
4178 pr_debug("Closing iSCSI connection CID %hu on SID:"
4179 " %u\n", conn->cid, sess->sid);
4180 /*
4181 * Always up conn_logout_comp just in case the RX Thread is sleeping
4182 * and the logout response never got sent because the connection
4183 * failed.
4184 */
4185 complete(&conn->conn_logout_comp);
4186
4187 iscsi_release_thread_set(conn);
4188
4189 iscsit_stop_timers_for_cmds(conn);
4190 iscsit_stop_nopin_response_timer(conn);
4191 iscsit_stop_nopin_timer(conn);
4192 iscsit_free_queue_reqs_for_conn(conn);
4193
4194 /*
4195 * During Connection recovery drop unacknowledged out of order
4196 * commands for this connection, and prepare the other commands
4197 * for realligence.
4198 *
4199 * During normal operation clear the out of order commands (but
4200 * do not free the struct iscsi_ooo_cmdsn's) and release all
4201 * struct iscsi_cmds.
4202 */
4203 if (atomic_read(&conn->connection_recovery)) {
4204 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4205 iscsit_prepare_cmds_for_realligance(conn);
4206 } else {
4207 iscsit_clear_ooo_cmdsns_for_conn(conn);
4208 iscsit_release_commands_from_conn(conn);
4209 }
4210
4211 /*
4212 * Handle decrementing session or connection usage count if
4213 * a logout response was not able to be sent because the
4214 * connection failed. Fall back to Session Recovery here.
4215 */
4216 if (atomic_read(&conn->conn_logout_remove)) {
4217 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4218 iscsit_dec_conn_usage_count(conn);
4219 iscsit_dec_session_usage_count(sess);
4220 }
4221 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4222 iscsit_dec_conn_usage_count(conn);
4223
4224 atomic_set(&conn->conn_logout_remove, 0);
4225 atomic_set(&sess->session_reinstatement, 0);
4226 atomic_set(&sess->session_fall_back_to_erl0, 1);
4227 }
4228
4229 spin_lock_bh(&sess->conn_lock);
4230 list_del(&conn->conn_list);
4231
4232 /*
4233 * Attempt to let the Initiator know this connection failed by
4234 * sending an Connection Dropped Async Message on another
4235 * active connection.
4236 */
4237 if (atomic_read(&conn->connection_recovery))
4238 iscsit_build_conn_drop_async_message(conn);
4239
4240 spin_unlock_bh(&sess->conn_lock);
4241
4242 /*
4243 * If connection reinstatement is being performed on this connection,
4244 * up the connection reinstatement semaphore that is being blocked on
4245 * in iscsit_cause_connection_reinstatement().
4246 */
4247 spin_lock_bh(&conn->state_lock);
4248 if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4249 spin_unlock_bh(&conn->state_lock);
4250 complete(&conn->conn_wait_comp);
4251 wait_for_completion(&conn->conn_post_wait_comp);
4252 spin_lock_bh(&conn->state_lock);
4253 }
4254
4255 /*
4256 * If connection reinstatement is being performed on this connection
4257 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4258 * connection wait rcfr semaphore that is being blocked on
4259 * an iscsit_connection_reinstatement_rcfr().
4260 */
4261 if (atomic_read(&conn->connection_wait_rcfr)) {
4262 spin_unlock_bh(&conn->state_lock);
4263 complete(&conn->conn_wait_rcfr_comp);
4264 wait_for_completion(&conn->conn_post_wait_comp);
4265 spin_lock_bh(&conn->state_lock);
4266 }
4267 atomic_set(&conn->connection_reinstatement, 1);
4268 spin_unlock_bh(&conn->state_lock);
4269
4270 /*
4271 * If any other processes are accessing this connection pointer we
4272 * must wait until they have completed.
4273 */
4274 iscsit_check_conn_usage_count(conn);
4275
4276 if (conn->conn_rx_hash.tfm)
4277 crypto_free_hash(conn->conn_rx_hash.tfm);
4278 if (conn->conn_tx_hash.tfm)
4279 crypto_free_hash(conn->conn_tx_hash.tfm);
4280
4281 if (conn->conn_cpumask)
4282 free_cpumask_var(conn->conn_cpumask);
4283
4284 kfree(conn->conn_ops);
4285 conn->conn_ops = NULL;
4286
4287 if (conn->sock)
4288 sock_release(conn->sock);
4289
4290 if (conn->conn_transport->iscsit_free_conn)
4291 conn->conn_transport->iscsit_free_conn(conn);
4292
4293 iscsit_put_transport(conn->conn_transport);
4294
4295 conn->thread_set = NULL;
4296
4297 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4298 conn->conn_state = TARG_CONN_STATE_FREE;
4299 kfree(conn);
4300
4301 spin_lock_bh(&sess->conn_lock);
4302 atomic_dec(&sess->nconn);
4303 pr_debug("Decremented iSCSI connection count to %hu from node:"
4304 " %s\n", atomic_read(&sess->nconn),
4305 sess->sess_ops->InitiatorName);
4306 /*
4307 * Make sure that if one connection fails in an non ERL=2 iSCSI
4308 * Session that they all fail.
4309 */
4310 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4311 !atomic_read(&sess->session_logout))
4312 atomic_set(&sess->session_fall_back_to_erl0, 1);
4313
4314 /*
4315 * If this was not the last connection in the session, and we are
4316 * performing session reinstatement or falling back to ERL=0, call
4317 * iscsit_stop_session() without sleeping to shutdown the other
4318 * active connections.
4319 */
4320 if (atomic_read(&sess->nconn)) {
4321 if (!atomic_read(&sess->session_reinstatement) &&
4322 !atomic_read(&sess->session_fall_back_to_erl0)) {
4323 spin_unlock_bh(&sess->conn_lock);
4324 return 0;
4325 }
4326 if (!atomic_read(&sess->session_stop_active)) {
4327 atomic_set(&sess->session_stop_active, 1);
4328 spin_unlock_bh(&sess->conn_lock);
4329 iscsit_stop_session(sess, 0, 0);
4330 return 0;
4331 }
4332 spin_unlock_bh(&sess->conn_lock);
4333 return 0;
4334 }
4335
4336 /*
4337 * If this was the last connection in the session and one of the
4338 * following is occurring:
4339 *
4340 * Session Reinstatement is not being performed, and are falling back
4341 * to ERL=0 call iscsit_close_session().
4342 *
4343 * Session Logout was requested. iscsit_close_session() will be called
4344 * elsewhere.
4345 *
4346 * Session Continuation is not being performed, start the Time2Retain
4347 * handler and check if sleep_on_sess_wait_sem is active.
4348 */
4349 if (!atomic_read(&sess->session_reinstatement) &&
4350 atomic_read(&sess->session_fall_back_to_erl0)) {
4351 spin_unlock_bh(&sess->conn_lock);
4352 target_put_session(sess->se_sess);
4353
4354 return 0;
4355 } else if (atomic_read(&sess->session_logout)) {
4356 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4357 sess->session_state = TARG_SESS_STATE_FREE;
4358 spin_unlock_bh(&sess->conn_lock);
4359
4360 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4361 complete(&sess->session_wait_comp);
4362
4363 return 0;
4364 } else {
4365 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4366 sess->session_state = TARG_SESS_STATE_FAILED;
4367
4368 if (!atomic_read(&sess->session_continuation)) {
4369 spin_unlock_bh(&sess->conn_lock);
4370 iscsit_start_time2retain_handler(sess);
4371 } else
4372 spin_unlock_bh(&sess->conn_lock);
4373
4374 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4375 complete(&sess->session_wait_comp);
4376
4377 return 0;
4378 }
4379 spin_unlock_bh(&sess->conn_lock);
4380
4381 return 0;
4382 }
4383
4384 int iscsit_close_session(struct iscsi_session *sess)
4385 {
4386 struct iscsi_portal_group *tpg = sess->tpg;
4387 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4388
4389 if (atomic_read(&sess->nconn)) {
4390 pr_err("%d connection(s) still exist for iSCSI session"
4391 " to %s\n", atomic_read(&sess->nconn),
4392 sess->sess_ops->InitiatorName);
4393 BUG();
4394 }
4395
4396 spin_lock_bh(&se_tpg->session_lock);
4397 atomic_set(&sess->session_logout, 1);
4398 atomic_set(&sess->session_reinstatement, 1);
4399 iscsit_stop_time2retain_timer(sess);
4400 spin_unlock_bh(&se_tpg->session_lock);
4401
4402 /*
4403 * transport_deregister_session_configfs() will clear the
4404 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4405 * can be setting it again with __transport_register_session() in
4406 * iscsi_post_login_handler() again after the iscsit_stop_session()
4407 * completes in iscsi_np context.
4408 */
4409 transport_deregister_session_configfs(sess->se_sess);
4410
4411 /*
4412 * If any other processes are accessing this session pointer we must
4413 * wait until they have completed. If we are in an interrupt (the
4414 * time2retain handler) and contain and active session usage count we
4415 * restart the timer and exit.
4416 */
4417 if (!in_interrupt()) {
4418 if (iscsit_check_session_usage_count(sess) == 1)
4419 iscsit_stop_session(sess, 1, 1);
4420 } else {
4421 if (iscsit_check_session_usage_count(sess) == 2) {
4422 atomic_set(&sess->session_logout, 0);
4423 iscsit_start_time2retain_handler(sess);
4424 return 0;
4425 }
4426 }
4427
4428 transport_deregister_session(sess->se_sess);
4429
4430 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4431 iscsit_free_connection_recovery_entires(sess);
4432
4433 iscsit_free_all_ooo_cmdsns(sess);
4434
4435 spin_lock_bh(&se_tpg->session_lock);
4436 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4437 sess->session_state = TARG_SESS_STATE_FREE;
4438 pr_debug("Released iSCSI session from node: %s\n",
4439 sess->sess_ops->InitiatorName);
4440 tpg->nsessions--;
4441 if (tpg->tpg_tiqn)
4442 tpg->tpg_tiqn->tiqn_nsessions--;
4443
4444 pr_debug("Decremented number of active iSCSI Sessions on"
4445 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4446
4447 spin_lock(&sess_idr_lock);
4448 idr_remove(&sess_idr, sess->session_index);
4449 spin_unlock(&sess_idr_lock);
4450
4451 kfree(sess->sess_ops);
4452 sess->sess_ops = NULL;
4453 spin_unlock_bh(&se_tpg->session_lock);
4454
4455 kfree(sess);
4456 return 0;
4457 }
4458
4459 static void iscsit_logout_post_handler_closesession(
4460 struct iscsi_conn *conn)
4461 {
4462 struct iscsi_session *sess = conn->sess;
4463
4464 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4465 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4466
4467 atomic_set(&conn->conn_logout_remove, 0);
4468 complete(&conn->conn_logout_comp);
4469
4470 iscsit_dec_conn_usage_count(conn);
4471 iscsit_stop_session(sess, 1, 1);
4472 iscsit_dec_session_usage_count(sess);
4473 target_put_session(sess->se_sess);
4474 }
4475
4476 static void iscsit_logout_post_handler_samecid(
4477 struct iscsi_conn *conn)
4478 {
4479 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4480 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4481
4482 atomic_set(&conn->conn_logout_remove, 0);
4483 complete(&conn->conn_logout_comp);
4484
4485 iscsit_cause_connection_reinstatement(conn, 1);
4486 iscsit_dec_conn_usage_count(conn);
4487 }
4488
4489 static void iscsit_logout_post_handler_diffcid(
4490 struct iscsi_conn *conn,
4491 u16 cid)
4492 {
4493 struct iscsi_conn *l_conn;
4494 struct iscsi_session *sess = conn->sess;
4495
4496 if (!sess)
4497 return;
4498
4499 spin_lock_bh(&sess->conn_lock);
4500 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4501 if (l_conn->cid == cid) {
4502 iscsit_inc_conn_usage_count(l_conn);
4503 break;
4504 }
4505 }
4506 spin_unlock_bh(&sess->conn_lock);
4507
4508 if (!l_conn)
4509 return;
4510
4511 if (l_conn->sock)
4512 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4513
4514 spin_lock_bh(&l_conn->state_lock);
4515 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4516 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4517 spin_unlock_bh(&l_conn->state_lock);
4518
4519 iscsit_cause_connection_reinstatement(l_conn, 1);
4520 iscsit_dec_conn_usage_count(l_conn);
4521 }
4522
4523 /*
4524 * Return of 0 causes the TX thread to restart.
4525 */
4526 int iscsit_logout_post_handler(
4527 struct iscsi_cmd *cmd,
4528 struct iscsi_conn *conn)
4529 {
4530 int ret = 0;
4531
4532 switch (cmd->logout_reason) {
4533 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4534 switch (cmd->logout_response) {
4535 case ISCSI_LOGOUT_SUCCESS:
4536 case ISCSI_LOGOUT_CLEANUP_FAILED:
4537 default:
4538 iscsit_logout_post_handler_closesession(conn);
4539 break;
4540 }
4541 ret = 0;
4542 break;
4543 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4544 if (conn->cid == cmd->logout_cid) {
4545 switch (cmd->logout_response) {
4546 case ISCSI_LOGOUT_SUCCESS:
4547 case ISCSI_LOGOUT_CLEANUP_FAILED:
4548 default:
4549 iscsit_logout_post_handler_samecid(conn);
4550 break;
4551 }
4552 ret = 0;
4553 } else {
4554 switch (cmd->logout_response) {
4555 case ISCSI_LOGOUT_SUCCESS:
4556 iscsit_logout_post_handler_diffcid(conn,
4557 cmd->logout_cid);
4558 break;
4559 case ISCSI_LOGOUT_CID_NOT_FOUND:
4560 case ISCSI_LOGOUT_CLEANUP_FAILED:
4561 default:
4562 break;
4563 }
4564 ret = 1;
4565 }
4566 break;
4567 case ISCSI_LOGOUT_REASON_RECOVERY:
4568 switch (cmd->logout_response) {
4569 case ISCSI_LOGOUT_SUCCESS:
4570 case ISCSI_LOGOUT_CID_NOT_FOUND:
4571 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4572 case ISCSI_LOGOUT_CLEANUP_FAILED:
4573 default:
4574 break;
4575 }
4576 ret = 1;
4577 break;
4578 default:
4579 break;
4580
4581 }
4582 return ret;
4583 }
4584 EXPORT_SYMBOL(iscsit_logout_post_handler);
4585
4586 void iscsit_fail_session(struct iscsi_session *sess)
4587 {
4588 struct iscsi_conn *conn;
4589
4590 spin_lock_bh(&sess->conn_lock);
4591 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4592 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4593 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4594 }
4595 spin_unlock_bh(&sess->conn_lock);
4596
4597 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4598 sess->session_state = TARG_SESS_STATE_FAILED;
4599 }
4600
4601 int iscsit_free_session(struct iscsi_session *sess)
4602 {
4603 u16 conn_count = atomic_read(&sess->nconn);
4604 struct iscsi_conn *conn, *conn_tmp = NULL;
4605 int is_last;
4606
4607 spin_lock_bh(&sess->conn_lock);
4608 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4609
4610 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4611 conn_list) {
4612 if (conn_count == 0)
4613 break;
4614
4615 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4616 is_last = 1;
4617 } else {
4618 iscsit_inc_conn_usage_count(conn_tmp);
4619 is_last = 0;
4620 }
4621 iscsit_inc_conn_usage_count(conn);
4622
4623 spin_unlock_bh(&sess->conn_lock);
4624 iscsit_cause_connection_reinstatement(conn, 1);
4625 spin_lock_bh(&sess->conn_lock);
4626
4627 iscsit_dec_conn_usage_count(conn);
4628 if (is_last == 0)
4629 iscsit_dec_conn_usage_count(conn_tmp);
4630
4631 conn_count--;
4632 }
4633
4634 if (atomic_read(&sess->nconn)) {
4635 spin_unlock_bh(&sess->conn_lock);
4636 wait_for_completion(&sess->session_wait_comp);
4637 } else
4638 spin_unlock_bh(&sess->conn_lock);
4639
4640 target_put_session(sess->se_sess);
4641 return 0;
4642 }
4643
4644 void iscsit_stop_session(
4645 struct iscsi_session *sess,
4646 int session_sleep,
4647 int connection_sleep)
4648 {
4649 u16 conn_count = atomic_read(&sess->nconn);
4650 struct iscsi_conn *conn, *conn_tmp = NULL;
4651 int is_last;
4652
4653 spin_lock_bh(&sess->conn_lock);
4654 if (session_sleep)
4655 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4656
4657 if (connection_sleep) {
4658 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4659 conn_list) {
4660 if (conn_count == 0)
4661 break;
4662
4663 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4664 is_last = 1;
4665 } else {
4666 iscsit_inc_conn_usage_count(conn_tmp);
4667 is_last = 0;
4668 }
4669 iscsit_inc_conn_usage_count(conn);
4670
4671 spin_unlock_bh(&sess->conn_lock);
4672 iscsit_cause_connection_reinstatement(conn, 1);
4673 spin_lock_bh(&sess->conn_lock);
4674
4675 iscsit_dec_conn_usage_count(conn);
4676 if (is_last == 0)
4677 iscsit_dec_conn_usage_count(conn_tmp);
4678 conn_count--;
4679 }
4680 } else {
4681 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4682 iscsit_cause_connection_reinstatement(conn, 0);
4683 }
4684
4685 if (session_sleep && atomic_read(&sess->nconn)) {
4686 spin_unlock_bh(&sess->conn_lock);
4687 wait_for_completion(&sess->session_wait_comp);
4688 } else
4689 spin_unlock_bh(&sess->conn_lock);
4690 }
4691
4692 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4693 {
4694 struct iscsi_session *sess;
4695 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4696 struct se_session *se_sess, *se_sess_tmp;
4697 int session_count = 0;
4698
4699 spin_lock_bh(&se_tpg->session_lock);
4700 if (tpg->nsessions && !force) {
4701 spin_unlock_bh(&se_tpg->session_lock);
4702 return -1;
4703 }
4704
4705 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4706 sess_list) {
4707 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4708
4709 spin_lock(&sess->conn_lock);
4710 if (atomic_read(&sess->session_fall_back_to_erl0) ||
4711 atomic_read(&sess->session_logout) ||
4712 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4713 spin_unlock(&sess->conn_lock);
4714 continue;
4715 }
4716 atomic_set(&sess->session_reinstatement, 1);
4717 spin_unlock(&sess->conn_lock);
4718 spin_unlock_bh(&se_tpg->session_lock);
4719
4720 iscsit_free_session(sess);
4721 spin_lock_bh(&se_tpg->session_lock);
4722
4723 session_count++;
4724 }
4725 spin_unlock_bh(&se_tpg->session_lock);
4726
4727 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4728 " Group: %hu\n", session_count, tpg->tpgt);
4729 return 0;
4730 }
4731
4732 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4733 MODULE_VERSION("4.1.x");
4734 MODULE_AUTHOR("nab@Linux-iSCSI.org");
4735 MODULE_LICENSE("GPL");
4736
4737 module_init(iscsi_target_init_module);
4738 module_exit(iscsi_target_cleanup_module);