]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/target/target_core_transport.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-zesty-kernel.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * (c) Copyright 2002-2013 Datera, Inc.
7 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/kthread.h>
33 #include <linux/in.h>
34 #include <linux/cdrom.h>
35 #include <linux/module.h>
36 #include <linux/ratelimit.h>
37 #include <linux/vmalloc.h>
38 #include <asm/unaligned.h>
39 #include <net/sock.h>
40 #include <net/tcp.h>
41 #include <scsi/scsi_proto.h>
42 #include <scsi/scsi_common.h>
43
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
47
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/target.h>
55
56 static struct workqueue_struct *target_completion_wq;
57 static struct kmem_cache *se_sess_cache;
58 struct kmem_cache *se_ua_cache;
59 struct kmem_cache *t10_pr_reg_cache;
60 struct kmem_cache *t10_alua_lu_gp_cache;
61 struct kmem_cache *t10_alua_lu_gp_mem_cache;
62 struct kmem_cache *t10_alua_tg_pt_gp_cache;
63 struct kmem_cache *t10_alua_lba_map_cache;
64 struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66 static void transport_complete_task_attr(struct se_cmd *cmd);
67 static void transport_handle_queue_full(struct se_cmd *cmd,
68 struct se_device *dev);
69 static int transport_put_cmd(struct se_cmd *cmd);
70 static void target_complete_ok_work(struct work_struct *work);
71
72 int init_se_kmem_caches(void)
73 {
74 se_sess_cache = kmem_cache_create("se_sess_cache",
75 sizeof(struct se_session), __alignof__(struct se_session),
76 0, NULL);
77 if (!se_sess_cache) {
78 pr_err("kmem_cache_create() for struct se_session"
79 " failed\n");
80 goto out;
81 }
82 se_ua_cache = kmem_cache_create("se_ua_cache",
83 sizeof(struct se_ua), __alignof__(struct se_ua),
84 0, NULL);
85 if (!se_ua_cache) {
86 pr_err("kmem_cache_create() for struct se_ua failed\n");
87 goto out_free_sess_cache;
88 }
89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90 sizeof(struct t10_pr_registration),
91 __alignof__(struct t10_pr_registration), 0, NULL);
92 if (!t10_pr_reg_cache) {
93 pr_err("kmem_cache_create() for struct t10_pr_registration"
94 " failed\n");
95 goto out_free_ua_cache;
96 }
97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99 0, NULL);
100 if (!t10_alua_lu_gp_cache) {
101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102 " failed\n");
103 goto out_free_pr_reg_cache;
104 }
105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106 sizeof(struct t10_alua_lu_gp_member),
107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108 if (!t10_alua_lu_gp_mem_cache) {
109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110 "cache failed\n");
111 goto out_free_lu_gp_cache;
112 }
113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114 sizeof(struct t10_alua_tg_pt_gp),
115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116 if (!t10_alua_tg_pt_gp_cache) {
117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118 "cache failed\n");
119 goto out_free_lu_gp_mem_cache;
120 }
121 t10_alua_lba_map_cache = kmem_cache_create(
122 "t10_alua_lba_map_cache",
123 sizeof(struct t10_alua_lba_map),
124 __alignof__(struct t10_alua_lba_map), 0, NULL);
125 if (!t10_alua_lba_map_cache) {
126 pr_err("kmem_cache_create() for t10_alua_lba_map_"
127 "cache failed\n");
128 goto out_free_tg_pt_gp_cache;
129 }
130 t10_alua_lba_map_mem_cache = kmem_cache_create(
131 "t10_alua_lba_map_mem_cache",
132 sizeof(struct t10_alua_lba_map_member),
133 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134 if (!t10_alua_lba_map_mem_cache) {
135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136 "cache failed\n");
137 goto out_free_lba_map_cache;
138 }
139
140 target_completion_wq = alloc_workqueue("target_completion",
141 WQ_MEM_RECLAIM, 0);
142 if (!target_completion_wq)
143 goto out_free_lba_map_mem_cache;
144
145 return 0;
146
147 out_free_lba_map_mem_cache:
148 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149 out_free_lba_map_cache:
150 kmem_cache_destroy(t10_alua_lba_map_cache);
151 out_free_tg_pt_gp_cache:
152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153 out_free_lu_gp_mem_cache:
154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155 out_free_lu_gp_cache:
156 kmem_cache_destroy(t10_alua_lu_gp_cache);
157 out_free_pr_reg_cache:
158 kmem_cache_destroy(t10_pr_reg_cache);
159 out_free_ua_cache:
160 kmem_cache_destroy(se_ua_cache);
161 out_free_sess_cache:
162 kmem_cache_destroy(se_sess_cache);
163 out:
164 return -ENOMEM;
165 }
166
167 void release_se_kmem_caches(void)
168 {
169 destroy_workqueue(target_completion_wq);
170 kmem_cache_destroy(se_sess_cache);
171 kmem_cache_destroy(se_ua_cache);
172 kmem_cache_destroy(t10_pr_reg_cache);
173 kmem_cache_destroy(t10_alua_lu_gp_cache);
174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176 kmem_cache_destroy(t10_alua_lba_map_cache);
177 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178 }
179
180 /* This code ensures unique mib indexes are handed out. */
181 static DEFINE_SPINLOCK(scsi_mib_index_lock);
182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183
184 /*
185 * Allocate a new row index for the entry type specified
186 */
187 u32 scsi_get_new_index(scsi_index_t type)
188 {
189 u32 new_index;
190
191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192
193 spin_lock(&scsi_mib_index_lock);
194 new_index = ++scsi_mib_index[type];
195 spin_unlock(&scsi_mib_index_lock);
196
197 return new_index;
198 }
199
200 void transport_subsystem_check_init(void)
201 {
202 int ret;
203 static int sub_api_initialized;
204
205 if (sub_api_initialized)
206 return;
207
208 ret = request_module("target_core_iblock");
209 if (ret != 0)
210 pr_err("Unable to load target_core_iblock\n");
211
212 ret = request_module("target_core_file");
213 if (ret != 0)
214 pr_err("Unable to load target_core_file\n");
215
216 ret = request_module("target_core_pscsi");
217 if (ret != 0)
218 pr_err("Unable to load target_core_pscsi\n");
219
220 ret = request_module("target_core_user");
221 if (ret != 0)
222 pr_err("Unable to load target_core_user\n");
223
224 sub_api_initialized = 1;
225 }
226
227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
228 {
229 struct se_session *se_sess;
230
231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
232 if (!se_sess) {
233 pr_err("Unable to allocate struct se_session from"
234 " se_sess_cache\n");
235 return ERR_PTR(-ENOMEM);
236 }
237 INIT_LIST_HEAD(&se_sess->sess_list);
238 INIT_LIST_HEAD(&se_sess->sess_acl_list);
239 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
240 INIT_LIST_HEAD(&se_sess->sess_wait_list);
241 spin_lock_init(&se_sess->sess_cmd_lock);
242 se_sess->sup_prot_ops = sup_prot_ops;
243
244 return se_sess;
245 }
246 EXPORT_SYMBOL(transport_init_session);
247
248 int transport_alloc_session_tags(struct se_session *se_sess,
249 unsigned int tag_num, unsigned int tag_size)
250 {
251 int rc;
252
253 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
254 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
255 if (!se_sess->sess_cmd_map) {
256 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
257 if (!se_sess->sess_cmd_map) {
258 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
259 return -ENOMEM;
260 }
261 }
262
263 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
264 if (rc < 0) {
265 pr_err("Unable to init se_sess->sess_tag_pool,"
266 " tag_num: %u\n", tag_num);
267 kvfree(se_sess->sess_cmd_map);
268 se_sess->sess_cmd_map = NULL;
269 return -ENOMEM;
270 }
271
272 return 0;
273 }
274 EXPORT_SYMBOL(transport_alloc_session_tags);
275
276 struct se_session *transport_init_session_tags(unsigned int tag_num,
277 unsigned int tag_size,
278 enum target_prot_op sup_prot_ops)
279 {
280 struct se_session *se_sess;
281 int rc;
282
283 if (tag_num != 0 && !tag_size) {
284 pr_err("init_session_tags called with percpu-ida tag_num:"
285 " %u, but zero tag_size\n", tag_num);
286 return ERR_PTR(-EINVAL);
287 }
288 if (!tag_num && tag_size) {
289 pr_err("init_session_tags called with percpu-ida tag_size:"
290 " %u, but zero tag_num\n", tag_size);
291 return ERR_PTR(-EINVAL);
292 }
293
294 se_sess = transport_init_session(sup_prot_ops);
295 if (IS_ERR(se_sess))
296 return se_sess;
297
298 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
299 if (rc < 0) {
300 transport_free_session(se_sess);
301 return ERR_PTR(-ENOMEM);
302 }
303
304 return se_sess;
305 }
306 EXPORT_SYMBOL(transport_init_session_tags);
307
308 /*
309 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
310 */
311 void __transport_register_session(
312 struct se_portal_group *se_tpg,
313 struct se_node_acl *se_nacl,
314 struct se_session *se_sess,
315 void *fabric_sess_ptr)
316 {
317 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
318 unsigned char buf[PR_REG_ISID_LEN];
319
320 se_sess->se_tpg = se_tpg;
321 se_sess->fabric_sess_ptr = fabric_sess_ptr;
322 /*
323 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
324 *
325 * Only set for struct se_session's that will actually be moving I/O.
326 * eg: *NOT* discovery sessions.
327 */
328 if (se_nacl) {
329 /*
330 *
331 * Determine if fabric allows for T10-PI feature bits exposed to
332 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
333 *
334 * If so, then always save prot_type on a per se_node_acl node
335 * basis and re-instate the previous sess_prot_type to avoid
336 * disabling PI from below any previously initiator side
337 * registered LUNs.
338 */
339 if (se_nacl->saved_prot_type)
340 se_sess->sess_prot_type = se_nacl->saved_prot_type;
341 else if (tfo->tpg_check_prot_fabric_only)
342 se_sess->sess_prot_type = se_nacl->saved_prot_type =
343 tfo->tpg_check_prot_fabric_only(se_tpg);
344 /*
345 * If the fabric module supports an ISID based TransportID,
346 * save this value in binary from the fabric I_T Nexus now.
347 */
348 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
349 memset(&buf[0], 0, PR_REG_ISID_LEN);
350 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
351 &buf[0], PR_REG_ISID_LEN);
352 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
353 }
354
355 spin_lock_irq(&se_nacl->nacl_sess_lock);
356 /*
357 * The se_nacl->nacl_sess pointer will be set to the
358 * last active I_T Nexus for each struct se_node_acl.
359 */
360 se_nacl->nacl_sess = se_sess;
361
362 list_add_tail(&se_sess->sess_acl_list,
363 &se_nacl->acl_sess_list);
364 spin_unlock_irq(&se_nacl->nacl_sess_lock);
365 }
366 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
367
368 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
369 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
370 }
371 EXPORT_SYMBOL(__transport_register_session);
372
373 void transport_register_session(
374 struct se_portal_group *se_tpg,
375 struct se_node_acl *se_nacl,
376 struct se_session *se_sess,
377 void *fabric_sess_ptr)
378 {
379 unsigned long flags;
380
381 spin_lock_irqsave(&se_tpg->session_lock, flags);
382 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
383 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
384 }
385 EXPORT_SYMBOL(transport_register_session);
386
387 struct se_session *
388 target_alloc_session(struct se_portal_group *tpg,
389 unsigned int tag_num, unsigned int tag_size,
390 enum target_prot_op prot_op,
391 const char *initiatorname, void *private,
392 int (*callback)(struct se_portal_group *,
393 struct se_session *, void *))
394 {
395 struct se_session *sess;
396
397 /*
398 * If the fabric driver is using percpu-ida based pre allocation
399 * of I/O descriptor tags, go ahead and perform that setup now..
400 */
401 if (tag_num != 0)
402 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
403 else
404 sess = transport_init_session(prot_op);
405
406 if (IS_ERR(sess))
407 return sess;
408
409 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
410 (unsigned char *)initiatorname);
411 if (!sess->se_node_acl) {
412 transport_free_session(sess);
413 return ERR_PTR(-EACCES);
414 }
415 /*
416 * Go ahead and perform any remaining fabric setup that is
417 * required before transport_register_session().
418 */
419 if (callback != NULL) {
420 int rc = callback(tpg, sess, private);
421 if (rc) {
422 transport_free_session(sess);
423 return ERR_PTR(rc);
424 }
425 }
426
427 transport_register_session(tpg, sess->se_node_acl, sess, private);
428 return sess;
429 }
430 EXPORT_SYMBOL(target_alloc_session);
431
432 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
433 {
434 struct se_session *se_sess;
435 ssize_t len = 0;
436
437 spin_lock_bh(&se_tpg->session_lock);
438 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
439 if (!se_sess->se_node_acl)
440 continue;
441 if (!se_sess->se_node_acl->dynamic_node_acl)
442 continue;
443 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
444 break;
445
446 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
447 se_sess->se_node_acl->initiatorname);
448 len += 1; /* Include NULL terminator */
449 }
450 spin_unlock_bh(&se_tpg->session_lock);
451
452 return len;
453 }
454 EXPORT_SYMBOL(target_show_dynamic_sessions);
455
456 static void target_complete_nacl(struct kref *kref)
457 {
458 struct se_node_acl *nacl = container_of(kref,
459 struct se_node_acl, acl_kref);
460
461 complete(&nacl->acl_free_comp);
462 }
463
464 void target_put_nacl(struct se_node_acl *nacl)
465 {
466 kref_put(&nacl->acl_kref, target_complete_nacl);
467 }
468 EXPORT_SYMBOL(target_put_nacl);
469
470 void transport_deregister_session_configfs(struct se_session *se_sess)
471 {
472 struct se_node_acl *se_nacl;
473 unsigned long flags;
474 /*
475 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
476 */
477 se_nacl = se_sess->se_node_acl;
478 if (se_nacl) {
479 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
480 if (!list_empty(&se_sess->sess_acl_list))
481 list_del_init(&se_sess->sess_acl_list);
482 /*
483 * If the session list is empty, then clear the pointer.
484 * Otherwise, set the struct se_session pointer from the tail
485 * element of the per struct se_node_acl active session list.
486 */
487 if (list_empty(&se_nacl->acl_sess_list))
488 se_nacl->nacl_sess = NULL;
489 else {
490 se_nacl->nacl_sess = container_of(
491 se_nacl->acl_sess_list.prev,
492 struct se_session, sess_acl_list);
493 }
494 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
495 }
496 }
497 EXPORT_SYMBOL(transport_deregister_session_configfs);
498
499 void transport_free_session(struct se_session *se_sess)
500 {
501 struct se_node_acl *se_nacl = se_sess->se_node_acl;
502 /*
503 * Drop the se_node_acl->nacl_kref obtained from within
504 * core_tpg_get_initiator_node_acl().
505 */
506 if (se_nacl) {
507 se_sess->se_node_acl = NULL;
508 target_put_nacl(se_nacl);
509 }
510 if (se_sess->sess_cmd_map) {
511 percpu_ida_destroy(&se_sess->sess_tag_pool);
512 kvfree(se_sess->sess_cmd_map);
513 }
514 kmem_cache_free(se_sess_cache, se_sess);
515 }
516 EXPORT_SYMBOL(transport_free_session);
517
518 void transport_deregister_session(struct se_session *se_sess)
519 {
520 struct se_portal_group *se_tpg = se_sess->se_tpg;
521 const struct target_core_fabric_ops *se_tfo;
522 struct se_node_acl *se_nacl;
523 unsigned long flags;
524 bool drop_nacl = false;
525
526 if (!se_tpg) {
527 transport_free_session(se_sess);
528 return;
529 }
530 se_tfo = se_tpg->se_tpg_tfo;
531
532 spin_lock_irqsave(&se_tpg->session_lock, flags);
533 list_del(&se_sess->sess_list);
534 se_sess->se_tpg = NULL;
535 se_sess->fabric_sess_ptr = NULL;
536 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
537
538 /*
539 * Determine if we need to do extra work for this initiator node's
540 * struct se_node_acl if it had been previously dynamically generated.
541 */
542 se_nacl = se_sess->se_node_acl;
543
544 mutex_lock(&se_tpg->acl_node_mutex);
545 if (se_nacl && se_nacl->dynamic_node_acl) {
546 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
547 list_del(&se_nacl->acl_list);
548 drop_nacl = true;
549 }
550 }
551 mutex_unlock(&se_tpg->acl_node_mutex);
552
553 if (drop_nacl) {
554 core_tpg_wait_for_nacl_pr_ref(se_nacl);
555 core_free_device_list_for_node(se_nacl, se_tpg);
556 se_sess->se_node_acl = NULL;
557 kfree(se_nacl);
558 }
559 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
560 se_tpg->se_tpg_tfo->get_fabric_name());
561 /*
562 * If last kref is dropping now for an explicit NodeACL, awake sleeping
563 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
564 * removal context from within transport_free_session() code.
565 */
566
567 transport_free_session(se_sess);
568 }
569 EXPORT_SYMBOL(transport_deregister_session);
570
571 static void target_remove_from_state_list(struct se_cmd *cmd)
572 {
573 struct se_device *dev = cmd->se_dev;
574 unsigned long flags;
575
576 if (!dev)
577 return;
578
579 if (cmd->transport_state & CMD_T_BUSY)
580 return;
581
582 spin_lock_irqsave(&dev->execute_task_lock, flags);
583 if (cmd->state_active) {
584 list_del(&cmd->state_list);
585 cmd->state_active = false;
586 }
587 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
588 }
589
590 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
591 bool write_pending)
592 {
593 unsigned long flags;
594
595 if (remove_from_lists) {
596 target_remove_from_state_list(cmd);
597
598 /*
599 * Clear struct se_cmd->se_lun before the handoff to FE.
600 */
601 cmd->se_lun = NULL;
602 }
603
604 spin_lock_irqsave(&cmd->t_state_lock, flags);
605 if (write_pending)
606 cmd->t_state = TRANSPORT_WRITE_PENDING;
607
608 /*
609 * Determine if frontend context caller is requesting the stopping of
610 * this command for frontend exceptions.
611 */
612 if (cmd->transport_state & CMD_T_STOP) {
613 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
614 __func__, __LINE__, cmd->tag);
615
616 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
617
618 complete_all(&cmd->t_transport_stop_comp);
619 return 1;
620 }
621
622 cmd->transport_state &= ~CMD_T_ACTIVE;
623 if (remove_from_lists) {
624 /*
625 * Some fabric modules like tcm_loop can release
626 * their internally allocated I/O reference now and
627 * struct se_cmd now.
628 *
629 * Fabric modules are expected to return '1' here if the
630 * se_cmd being passed is released at this point,
631 * or zero if not being released.
632 */
633 if (cmd->se_tfo->check_stop_free != NULL) {
634 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
635 return cmd->se_tfo->check_stop_free(cmd);
636 }
637 }
638
639 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
640 return 0;
641 }
642
643 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
644 {
645 return transport_cmd_check_stop(cmd, true, false);
646 }
647
648 static void transport_lun_remove_cmd(struct se_cmd *cmd)
649 {
650 struct se_lun *lun = cmd->se_lun;
651
652 if (!lun)
653 return;
654
655 if (cmpxchg(&cmd->lun_ref_active, true, false))
656 percpu_ref_put(&lun->lun_ref);
657 }
658
659 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
660 {
661 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
662
663 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
664 transport_lun_remove_cmd(cmd);
665 /*
666 * Allow the fabric driver to unmap any resources before
667 * releasing the descriptor via TFO->release_cmd()
668 */
669 if (remove)
670 cmd->se_tfo->aborted_task(cmd);
671
672 if (transport_cmd_check_stop_to_fabric(cmd))
673 return;
674 if (remove && ack_kref)
675 transport_put_cmd(cmd);
676 }
677
678 static void target_complete_failure_work(struct work_struct *work)
679 {
680 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
681
682 transport_generic_request_failure(cmd,
683 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
684 }
685
686 /*
687 * Used when asking transport to copy Sense Data from the underlying
688 * Linux/SCSI struct scsi_cmnd
689 */
690 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
691 {
692 struct se_device *dev = cmd->se_dev;
693
694 WARN_ON(!cmd->se_lun);
695
696 if (!dev)
697 return NULL;
698
699 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
700 return NULL;
701
702 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
703
704 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
705 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
706 return cmd->sense_buffer;
707 }
708
709 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
710 {
711 struct se_device *dev = cmd->se_dev;
712 int success = scsi_status == GOOD;
713 unsigned long flags;
714
715 cmd->scsi_status = scsi_status;
716
717
718 spin_lock_irqsave(&cmd->t_state_lock, flags);
719 cmd->transport_state &= ~CMD_T_BUSY;
720
721 if (dev && dev->transport->transport_complete) {
722 dev->transport->transport_complete(cmd,
723 cmd->t_data_sg,
724 transport_get_sense_buffer(cmd));
725 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
726 success = 1;
727 }
728
729 /*
730 * Check for case where an explicit ABORT_TASK has been received
731 * and transport_wait_for_tasks() will be waiting for completion..
732 */
733 if (cmd->transport_state & CMD_T_ABORTED ||
734 cmd->transport_state & CMD_T_STOP) {
735 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
736 complete_all(&cmd->t_transport_stop_comp);
737 return;
738 } else if (!success) {
739 INIT_WORK(&cmd->work, target_complete_failure_work);
740 } else {
741 INIT_WORK(&cmd->work, target_complete_ok_work);
742 }
743
744 cmd->t_state = TRANSPORT_COMPLETE;
745 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
746 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
747
748 if (cmd->se_cmd_flags & SCF_USE_CPUID)
749 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
750 else
751 queue_work(target_completion_wq, &cmd->work);
752 }
753 EXPORT_SYMBOL(target_complete_cmd);
754
755 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
756 {
757 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
758 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
759 cmd->residual_count += cmd->data_length - length;
760 } else {
761 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
762 cmd->residual_count = cmd->data_length - length;
763 }
764
765 cmd->data_length = length;
766 }
767
768 target_complete_cmd(cmd, scsi_status);
769 }
770 EXPORT_SYMBOL(target_complete_cmd_with_length);
771
772 static void target_add_to_state_list(struct se_cmd *cmd)
773 {
774 struct se_device *dev = cmd->se_dev;
775 unsigned long flags;
776
777 spin_lock_irqsave(&dev->execute_task_lock, flags);
778 if (!cmd->state_active) {
779 list_add_tail(&cmd->state_list, &dev->state_list);
780 cmd->state_active = true;
781 }
782 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
783 }
784
785 /*
786 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
787 */
788 static void transport_write_pending_qf(struct se_cmd *cmd);
789 static void transport_complete_qf(struct se_cmd *cmd);
790
791 void target_qf_do_work(struct work_struct *work)
792 {
793 struct se_device *dev = container_of(work, struct se_device,
794 qf_work_queue);
795 LIST_HEAD(qf_cmd_list);
796 struct se_cmd *cmd, *cmd_tmp;
797
798 spin_lock_irq(&dev->qf_cmd_lock);
799 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
800 spin_unlock_irq(&dev->qf_cmd_lock);
801
802 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
803 list_del(&cmd->se_qf_node);
804 atomic_dec_mb(&dev->dev_qf_count);
805
806 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
807 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
808 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
809 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
810 : "UNKNOWN");
811
812 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
813 transport_write_pending_qf(cmd);
814 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
815 transport_complete_qf(cmd);
816 }
817 }
818
819 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
820 {
821 switch (cmd->data_direction) {
822 case DMA_NONE:
823 return "NONE";
824 case DMA_FROM_DEVICE:
825 return "READ";
826 case DMA_TO_DEVICE:
827 return "WRITE";
828 case DMA_BIDIRECTIONAL:
829 return "BIDI";
830 default:
831 break;
832 }
833
834 return "UNKNOWN";
835 }
836
837 void transport_dump_dev_state(
838 struct se_device *dev,
839 char *b,
840 int *bl)
841 {
842 *bl += sprintf(b + *bl, "Status: ");
843 if (dev->export_count)
844 *bl += sprintf(b + *bl, "ACTIVATED");
845 else
846 *bl += sprintf(b + *bl, "DEACTIVATED");
847
848 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
849 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
850 dev->dev_attrib.block_size,
851 dev->dev_attrib.hw_max_sectors);
852 *bl += sprintf(b + *bl, " ");
853 }
854
855 void transport_dump_vpd_proto_id(
856 struct t10_vpd *vpd,
857 unsigned char *p_buf,
858 int p_buf_len)
859 {
860 unsigned char buf[VPD_TMP_BUF_SIZE];
861 int len;
862
863 memset(buf, 0, VPD_TMP_BUF_SIZE);
864 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
865
866 switch (vpd->protocol_identifier) {
867 case 0x00:
868 sprintf(buf+len, "Fibre Channel\n");
869 break;
870 case 0x10:
871 sprintf(buf+len, "Parallel SCSI\n");
872 break;
873 case 0x20:
874 sprintf(buf+len, "SSA\n");
875 break;
876 case 0x30:
877 sprintf(buf+len, "IEEE 1394\n");
878 break;
879 case 0x40:
880 sprintf(buf+len, "SCSI Remote Direct Memory Access"
881 " Protocol\n");
882 break;
883 case 0x50:
884 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
885 break;
886 case 0x60:
887 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
888 break;
889 case 0x70:
890 sprintf(buf+len, "Automation/Drive Interface Transport"
891 " Protocol\n");
892 break;
893 case 0x80:
894 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
895 break;
896 default:
897 sprintf(buf+len, "Unknown 0x%02x\n",
898 vpd->protocol_identifier);
899 break;
900 }
901
902 if (p_buf)
903 strncpy(p_buf, buf, p_buf_len);
904 else
905 pr_debug("%s", buf);
906 }
907
908 void
909 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
910 {
911 /*
912 * Check if the Protocol Identifier Valid (PIV) bit is set..
913 *
914 * from spc3r23.pdf section 7.5.1
915 */
916 if (page_83[1] & 0x80) {
917 vpd->protocol_identifier = (page_83[0] & 0xf0);
918 vpd->protocol_identifier_set = 1;
919 transport_dump_vpd_proto_id(vpd, NULL, 0);
920 }
921 }
922 EXPORT_SYMBOL(transport_set_vpd_proto_id);
923
924 int transport_dump_vpd_assoc(
925 struct t10_vpd *vpd,
926 unsigned char *p_buf,
927 int p_buf_len)
928 {
929 unsigned char buf[VPD_TMP_BUF_SIZE];
930 int ret = 0;
931 int len;
932
933 memset(buf, 0, VPD_TMP_BUF_SIZE);
934 len = sprintf(buf, "T10 VPD Identifier Association: ");
935
936 switch (vpd->association) {
937 case 0x00:
938 sprintf(buf+len, "addressed logical unit\n");
939 break;
940 case 0x10:
941 sprintf(buf+len, "target port\n");
942 break;
943 case 0x20:
944 sprintf(buf+len, "SCSI target device\n");
945 break;
946 default:
947 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
948 ret = -EINVAL;
949 break;
950 }
951
952 if (p_buf)
953 strncpy(p_buf, buf, p_buf_len);
954 else
955 pr_debug("%s", buf);
956
957 return ret;
958 }
959
960 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
961 {
962 /*
963 * The VPD identification association..
964 *
965 * from spc3r23.pdf Section 7.6.3.1 Table 297
966 */
967 vpd->association = (page_83[1] & 0x30);
968 return transport_dump_vpd_assoc(vpd, NULL, 0);
969 }
970 EXPORT_SYMBOL(transport_set_vpd_assoc);
971
972 int transport_dump_vpd_ident_type(
973 struct t10_vpd *vpd,
974 unsigned char *p_buf,
975 int p_buf_len)
976 {
977 unsigned char buf[VPD_TMP_BUF_SIZE];
978 int ret = 0;
979 int len;
980
981 memset(buf, 0, VPD_TMP_BUF_SIZE);
982 len = sprintf(buf, "T10 VPD Identifier Type: ");
983
984 switch (vpd->device_identifier_type) {
985 case 0x00:
986 sprintf(buf+len, "Vendor specific\n");
987 break;
988 case 0x01:
989 sprintf(buf+len, "T10 Vendor ID based\n");
990 break;
991 case 0x02:
992 sprintf(buf+len, "EUI-64 based\n");
993 break;
994 case 0x03:
995 sprintf(buf+len, "NAA\n");
996 break;
997 case 0x04:
998 sprintf(buf+len, "Relative target port identifier\n");
999 break;
1000 case 0x08:
1001 sprintf(buf+len, "SCSI name string\n");
1002 break;
1003 default:
1004 sprintf(buf+len, "Unsupported: 0x%02x\n",
1005 vpd->device_identifier_type);
1006 ret = -EINVAL;
1007 break;
1008 }
1009
1010 if (p_buf) {
1011 if (p_buf_len < strlen(buf)+1)
1012 return -EINVAL;
1013 strncpy(p_buf, buf, p_buf_len);
1014 } else {
1015 pr_debug("%s", buf);
1016 }
1017
1018 return ret;
1019 }
1020
1021 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1022 {
1023 /*
1024 * The VPD identifier type..
1025 *
1026 * from spc3r23.pdf Section 7.6.3.1 Table 298
1027 */
1028 vpd->device_identifier_type = (page_83[1] & 0x0f);
1029 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1030 }
1031 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1032
1033 int transport_dump_vpd_ident(
1034 struct t10_vpd *vpd,
1035 unsigned char *p_buf,
1036 int p_buf_len)
1037 {
1038 unsigned char buf[VPD_TMP_BUF_SIZE];
1039 int ret = 0;
1040
1041 memset(buf, 0, VPD_TMP_BUF_SIZE);
1042
1043 switch (vpd->device_identifier_code_set) {
1044 case 0x01: /* Binary */
1045 snprintf(buf, sizeof(buf),
1046 "T10 VPD Binary Device Identifier: %s\n",
1047 &vpd->device_identifier[0]);
1048 break;
1049 case 0x02: /* ASCII */
1050 snprintf(buf, sizeof(buf),
1051 "T10 VPD ASCII Device Identifier: %s\n",
1052 &vpd->device_identifier[0]);
1053 break;
1054 case 0x03: /* UTF-8 */
1055 snprintf(buf, sizeof(buf),
1056 "T10 VPD UTF-8 Device Identifier: %s\n",
1057 &vpd->device_identifier[0]);
1058 break;
1059 default:
1060 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1061 " 0x%02x", vpd->device_identifier_code_set);
1062 ret = -EINVAL;
1063 break;
1064 }
1065
1066 if (p_buf)
1067 strncpy(p_buf, buf, p_buf_len);
1068 else
1069 pr_debug("%s", buf);
1070
1071 return ret;
1072 }
1073
1074 int
1075 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1076 {
1077 static const char hex_str[] = "0123456789abcdef";
1078 int j = 0, i = 4; /* offset to start of the identifier */
1079
1080 /*
1081 * The VPD Code Set (encoding)
1082 *
1083 * from spc3r23.pdf Section 7.6.3.1 Table 296
1084 */
1085 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1086 switch (vpd->device_identifier_code_set) {
1087 case 0x01: /* Binary */
1088 vpd->device_identifier[j++] =
1089 hex_str[vpd->device_identifier_type];
1090 while (i < (4 + page_83[3])) {
1091 vpd->device_identifier[j++] =
1092 hex_str[(page_83[i] & 0xf0) >> 4];
1093 vpd->device_identifier[j++] =
1094 hex_str[page_83[i] & 0x0f];
1095 i++;
1096 }
1097 break;
1098 case 0x02: /* ASCII */
1099 case 0x03: /* UTF-8 */
1100 while (i < (4 + page_83[3]))
1101 vpd->device_identifier[j++] = page_83[i++];
1102 break;
1103 default:
1104 break;
1105 }
1106
1107 return transport_dump_vpd_ident(vpd, NULL, 0);
1108 }
1109 EXPORT_SYMBOL(transport_set_vpd_ident);
1110
1111 static sense_reason_t
1112 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1113 unsigned int size)
1114 {
1115 u32 mtl;
1116
1117 if (!cmd->se_tfo->max_data_sg_nents)
1118 return TCM_NO_SENSE;
1119 /*
1120 * Check if fabric enforced maximum SGL entries per I/O descriptor
1121 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
1122 * residual_count and reduce original cmd->data_length to maximum
1123 * length based on single PAGE_SIZE entry scatter-lists.
1124 */
1125 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1126 if (cmd->data_length > mtl) {
1127 /*
1128 * If an existing CDB overflow is present, calculate new residual
1129 * based on CDB size minus fabric maximum transfer length.
1130 *
1131 * If an existing CDB underflow is present, calculate new residual
1132 * based on original cmd->data_length minus fabric maximum transfer
1133 * length.
1134 *
1135 * Otherwise, set the underflow residual based on cmd->data_length
1136 * minus fabric maximum transfer length.
1137 */
1138 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1139 cmd->residual_count = (size - mtl);
1140 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1141 u32 orig_dl = size + cmd->residual_count;
1142 cmd->residual_count = (orig_dl - mtl);
1143 } else {
1144 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1145 cmd->residual_count = (cmd->data_length - mtl);
1146 }
1147 cmd->data_length = mtl;
1148 /*
1149 * Reset sbc_check_prot() calculated protection payload
1150 * length based upon the new smaller MTL.
1151 */
1152 if (cmd->prot_length) {
1153 u32 sectors = (mtl / dev->dev_attrib.block_size);
1154 cmd->prot_length = dev->prot_length * sectors;
1155 }
1156 }
1157 return TCM_NO_SENSE;
1158 }
1159
1160 sense_reason_t
1161 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1162 {
1163 struct se_device *dev = cmd->se_dev;
1164
1165 if (cmd->unknown_data_length) {
1166 cmd->data_length = size;
1167 } else if (size != cmd->data_length) {
1168 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1169 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1170 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1171 cmd->data_length, size, cmd->t_task_cdb[0]);
1172
1173 if (cmd->data_direction == DMA_TO_DEVICE &&
1174 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1175 pr_err("Rejecting underflow/overflow WRITE data\n");
1176 return TCM_INVALID_CDB_FIELD;
1177 }
1178 /*
1179 * Reject READ_* or WRITE_* with overflow/underflow for
1180 * type SCF_SCSI_DATA_CDB.
1181 */
1182 if (dev->dev_attrib.block_size != 512) {
1183 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1184 " CDB on non 512-byte sector setup subsystem"
1185 " plugin: %s\n", dev->transport->name);
1186 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1187 return TCM_INVALID_CDB_FIELD;
1188 }
1189 /*
1190 * For the overflow case keep the existing fabric provided
1191 * ->data_length. Otherwise for the underflow case, reset
1192 * ->data_length to the smaller SCSI expected data transfer
1193 * length.
1194 */
1195 if (size > cmd->data_length) {
1196 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1197 cmd->residual_count = (size - cmd->data_length);
1198 } else {
1199 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1200 cmd->residual_count = (cmd->data_length - size);
1201 cmd->data_length = size;
1202 }
1203 }
1204
1205 return target_check_max_data_sg_nents(cmd, dev, size);
1206
1207 }
1208
1209 /*
1210 * Used by fabric modules containing a local struct se_cmd within their
1211 * fabric dependent per I/O descriptor.
1212 *
1213 * Preserves the value of @cmd->tag.
1214 */
1215 void transport_init_se_cmd(
1216 struct se_cmd *cmd,
1217 const struct target_core_fabric_ops *tfo,
1218 struct se_session *se_sess,
1219 u32 data_length,
1220 int data_direction,
1221 int task_attr,
1222 unsigned char *sense_buffer)
1223 {
1224 INIT_LIST_HEAD(&cmd->se_delayed_node);
1225 INIT_LIST_HEAD(&cmd->se_qf_node);
1226 INIT_LIST_HEAD(&cmd->se_cmd_list);
1227 INIT_LIST_HEAD(&cmd->state_list);
1228 init_completion(&cmd->t_transport_stop_comp);
1229 init_completion(&cmd->cmd_wait_comp);
1230 spin_lock_init(&cmd->t_state_lock);
1231 kref_init(&cmd->cmd_kref);
1232 cmd->transport_state = CMD_T_DEV_ACTIVE;
1233
1234 cmd->se_tfo = tfo;
1235 cmd->se_sess = se_sess;
1236 cmd->data_length = data_length;
1237 cmd->data_direction = data_direction;
1238 cmd->sam_task_attr = task_attr;
1239 cmd->sense_buffer = sense_buffer;
1240
1241 cmd->state_active = false;
1242 }
1243 EXPORT_SYMBOL(transport_init_se_cmd);
1244
1245 static sense_reason_t
1246 transport_check_alloc_task_attr(struct se_cmd *cmd)
1247 {
1248 struct se_device *dev = cmd->se_dev;
1249
1250 /*
1251 * Check if SAM Task Attribute emulation is enabled for this
1252 * struct se_device storage object
1253 */
1254 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1255 return 0;
1256
1257 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1258 pr_debug("SAM Task Attribute ACA"
1259 " emulation is not supported\n");
1260 return TCM_INVALID_CDB_FIELD;
1261 }
1262
1263 return 0;
1264 }
1265
1266 sense_reason_t
1267 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1268 {
1269 struct se_device *dev = cmd->se_dev;
1270 sense_reason_t ret;
1271
1272 /*
1273 * Ensure that the received CDB is less than the max (252 + 8) bytes
1274 * for VARIABLE_LENGTH_CMD
1275 */
1276 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1277 pr_err("Received SCSI CDB with command_size: %d that"
1278 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1279 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1280 return TCM_INVALID_CDB_FIELD;
1281 }
1282 /*
1283 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1284 * allocate the additional extended CDB buffer now.. Otherwise
1285 * setup the pointer from __t_task_cdb to t_task_cdb.
1286 */
1287 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1288 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1289 GFP_KERNEL);
1290 if (!cmd->t_task_cdb) {
1291 pr_err("Unable to allocate cmd->t_task_cdb"
1292 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1293 scsi_command_size(cdb),
1294 (unsigned long)sizeof(cmd->__t_task_cdb));
1295 return TCM_OUT_OF_RESOURCES;
1296 }
1297 } else
1298 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1299 /*
1300 * Copy the original CDB into cmd->
1301 */
1302 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1303
1304 trace_target_sequencer_start(cmd);
1305
1306 ret = dev->transport->parse_cdb(cmd);
1307 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1308 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1309 cmd->se_tfo->get_fabric_name(),
1310 cmd->se_sess->se_node_acl->initiatorname,
1311 cmd->t_task_cdb[0]);
1312 if (ret)
1313 return ret;
1314
1315 ret = transport_check_alloc_task_attr(cmd);
1316 if (ret)
1317 return ret;
1318
1319 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1320 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1321 return 0;
1322 }
1323 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1324
1325 /*
1326 * Used by fabric module frontends to queue tasks directly.
1327 * May only be used from process context.
1328 */
1329 int transport_handle_cdb_direct(
1330 struct se_cmd *cmd)
1331 {
1332 sense_reason_t ret;
1333
1334 if (!cmd->se_lun) {
1335 dump_stack();
1336 pr_err("cmd->se_lun is NULL\n");
1337 return -EINVAL;
1338 }
1339 if (in_interrupt()) {
1340 dump_stack();
1341 pr_err("transport_generic_handle_cdb cannot be called"
1342 " from interrupt context\n");
1343 return -EINVAL;
1344 }
1345 /*
1346 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1347 * outstanding descriptors are handled correctly during shutdown via
1348 * transport_wait_for_tasks()
1349 *
1350 * Also, we don't take cmd->t_state_lock here as we only expect
1351 * this to be called for initial descriptor submission.
1352 */
1353 cmd->t_state = TRANSPORT_NEW_CMD;
1354 cmd->transport_state |= CMD_T_ACTIVE;
1355
1356 /*
1357 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1358 * so follow TRANSPORT_NEW_CMD processing thread context usage
1359 * and call transport_generic_request_failure() if necessary..
1360 */
1361 ret = transport_generic_new_cmd(cmd);
1362 if (ret)
1363 transport_generic_request_failure(cmd, ret);
1364 return 0;
1365 }
1366 EXPORT_SYMBOL(transport_handle_cdb_direct);
1367
1368 sense_reason_t
1369 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1370 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1371 {
1372 if (!sgl || !sgl_count)
1373 return 0;
1374
1375 /*
1376 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1377 * scatterlists already have been set to follow what the fabric
1378 * passes for the original expected data transfer length.
1379 */
1380 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1381 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1382 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1383 return TCM_INVALID_CDB_FIELD;
1384 }
1385
1386 cmd->t_data_sg = sgl;
1387 cmd->t_data_nents = sgl_count;
1388 cmd->t_bidi_data_sg = sgl_bidi;
1389 cmd->t_bidi_data_nents = sgl_bidi_count;
1390
1391 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1392 return 0;
1393 }
1394
1395 /*
1396 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1397 * se_cmd + use pre-allocated SGL memory.
1398 *
1399 * @se_cmd: command descriptor to submit
1400 * @se_sess: associated se_sess for endpoint
1401 * @cdb: pointer to SCSI CDB
1402 * @sense: pointer to SCSI sense buffer
1403 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1404 * @data_length: fabric expected data transfer length
1405 * @task_addr: SAM task attribute
1406 * @data_dir: DMA data direction
1407 * @flags: flags for command submission from target_sc_flags_tables
1408 * @sgl: struct scatterlist memory for unidirectional mapping
1409 * @sgl_count: scatterlist count for unidirectional mapping
1410 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1411 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1412 * @sgl_prot: struct scatterlist memory protection information
1413 * @sgl_prot_count: scatterlist count for protection information
1414 *
1415 * Task tags are supported if the caller has set @se_cmd->tag.
1416 *
1417 * Returns non zero to signal active I/O shutdown failure. All other
1418 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1419 * but still return zero here.
1420 *
1421 * This may only be called from process context, and also currently
1422 * assumes internal allocation of fabric payload buffer by target-core.
1423 */
1424 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1425 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1426 u32 data_length, int task_attr, int data_dir, int flags,
1427 struct scatterlist *sgl, u32 sgl_count,
1428 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1429 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1430 {
1431 struct se_portal_group *se_tpg;
1432 sense_reason_t rc;
1433 int ret;
1434
1435 se_tpg = se_sess->se_tpg;
1436 BUG_ON(!se_tpg);
1437 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1438 BUG_ON(in_interrupt());
1439 /*
1440 * Initialize se_cmd for target operation. From this point
1441 * exceptions are handled by sending exception status via
1442 * target_core_fabric_ops->queue_status() callback
1443 */
1444 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1445 data_length, data_dir, task_attr, sense);
1446
1447 if (flags & TARGET_SCF_USE_CPUID)
1448 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1449 else
1450 se_cmd->cpuid = WORK_CPU_UNBOUND;
1451
1452 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1453 se_cmd->unknown_data_length = 1;
1454 /*
1455 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1456 * se_sess->sess_cmd_list. A second kref_get here is necessary
1457 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1458 * kref_put() to happen during fabric packet acknowledgement.
1459 */
1460 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1461 if (ret)
1462 return ret;
1463 /*
1464 * Signal bidirectional data payloads to target-core
1465 */
1466 if (flags & TARGET_SCF_BIDI_OP)
1467 se_cmd->se_cmd_flags |= SCF_BIDI;
1468 /*
1469 * Locate se_lun pointer and attach it to struct se_cmd
1470 */
1471 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1472 if (rc) {
1473 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1474 target_put_sess_cmd(se_cmd);
1475 return 0;
1476 }
1477
1478 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1479 if (rc != 0) {
1480 transport_generic_request_failure(se_cmd, rc);
1481 return 0;
1482 }
1483
1484 /*
1485 * Save pointers for SGLs containing protection information,
1486 * if present.
1487 */
1488 if (sgl_prot_count) {
1489 se_cmd->t_prot_sg = sgl_prot;
1490 se_cmd->t_prot_nents = sgl_prot_count;
1491 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1492 }
1493
1494 /*
1495 * When a non zero sgl_count has been passed perform SGL passthrough
1496 * mapping for pre-allocated fabric memory instead of having target
1497 * core perform an internal SGL allocation..
1498 */
1499 if (sgl_count != 0) {
1500 BUG_ON(!sgl);
1501
1502 /*
1503 * A work-around for tcm_loop as some userspace code via
1504 * scsi-generic do not memset their associated read buffers,
1505 * so go ahead and do that here for type non-data CDBs. Also
1506 * note that this is currently guaranteed to be a single SGL
1507 * for this case by target core in target_setup_cmd_from_cdb()
1508 * -> transport_generic_cmd_sequencer().
1509 */
1510 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1511 se_cmd->data_direction == DMA_FROM_DEVICE) {
1512 unsigned char *buf = NULL;
1513
1514 if (sgl)
1515 buf = kmap(sg_page(sgl)) + sgl->offset;
1516
1517 if (buf) {
1518 memset(buf, 0, sgl->length);
1519 kunmap(sg_page(sgl));
1520 }
1521 }
1522
1523 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1524 sgl_bidi, sgl_bidi_count);
1525 if (rc != 0) {
1526 transport_generic_request_failure(se_cmd, rc);
1527 return 0;
1528 }
1529 }
1530
1531 /*
1532 * Check if we need to delay processing because of ALUA
1533 * Active/NonOptimized primary access state..
1534 */
1535 core_alua_check_nonop_delay(se_cmd);
1536
1537 transport_handle_cdb_direct(se_cmd);
1538 return 0;
1539 }
1540 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1541
1542 /*
1543 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1544 *
1545 * @se_cmd: command descriptor to submit
1546 * @se_sess: associated se_sess for endpoint
1547 * @cdb: pointer to SCSI CDB
1548 * @sense: pointer to SCSI sense buffer
1549 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1550 * @data_length: fabric expected data transfer length
1551 * @task_addr: SAM task attribute
1552 * @data_dir: DMA data direction
1553 * @flags: flags for command submission from target_sc_flags_tables
1554 *
1555 * Task tags are supported if the caller has set @se_cmd->tag.
1556 *
1557 * Returns non zero to signal active I/O shutdown failure. All other
1558 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1559 * but still return zero here.
1560 *
1561 * This may only be called from process context, and also currently
1562 * assumes internal allocation of fabric payload buffer by target-core.
1563 *
1564 * It also assumes interal target core SGL memory allocation.
1565 */
1566 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1567 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1568 u32 data_length, int task_attr, int data_dir, int flags)
1569 {
1570 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1571 unpacked_lun, data_length, task_attr, data_dir,
1572 flags, NULL, 0, NULL, 0, NULL, 0);
1573 }
1574 EXPORT_SYMBOL(target_submit_cmd);
1575
1576 static void target_complete_tmr_failure(struct work_struct *work)
1577 {
1578 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1579
1580 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1581 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1582
1583 transport_cmd_check_stop_to_fabric(se_cmd);
1584 }
1585
1586 /**
1587 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1588 * for TMR CDBs
1589 *
1590 * @se_cmd: command descriptor to submit
1591 * @se_sess: associated se_sess for endpoint
1592 * @sense: pointer to SCSI sense buffer
1593 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1594 * @fabric_context: fabric context for TMR req
1595 * @tm_type: Type of TM request
1596 * @gfp: gfp type for caller
1597 * @tag: referenced task tag for TMR_ABORT_TASK
1598 * @flags: submit cmd flags
1599 *
1600 * Callable from all contexts.
1601 **/
1602
1603 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1604 unsigned char *sense, u64 unpacked_lun,
1605 void *fabric_tmr_ptr, unsigned char tm_type,
1606 gfp_t gfp, u64 tag, int flags)
1607 {
1608 struct se_portal_group *se_tpg;
1609 int ret;
1610
1611 se_tpg = se_sess->se_tpg;
1612 BUG_ON(!se_tpg);
1613
1614 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1615 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1616 /*
1617 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1618 * allocation failure.
1619 */
1620 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1621 if (ret < 0)
1622 return -ENOMEM;
1623
1624 if (tm_type == TMR_ABORT_TASK)
1625 se_cmd->se_tmr_req->ref_task_tag = tag;
1626
1627 /* See target_submit_cmd for commentary */
1628 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1629 if (ret) {
1630 core_tmr_release_req(se_cmd->se_tmr_req);
1631 return ret;
1632 }
1633
1634 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1635 if (ret) {
1636 /*
1637 * For callback during failure handling, push this work off
1638 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1639 */
1640 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1641 schedule_work(&se_cmd->work);
1642 return 0;
1643 }
1644 transport_generic_handle_tmr(se_cmd);
1645 return 0;
1646 }
1647 EXPORT_SYMBOL(target_submit_tmr);
1648
1649 /*
1650 * Handle SAM-esque emulation for generic transport request failures.
1651 */
1652 void transport_generic_request_failure(struct se_cmd *cmd,
1653 sense_reason_t sense_reason)
1654 {
1655 int ret = 0, post_ret = 0;
1656
1657 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1658 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
1659 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
1660 cmd->se_tfo->get_cmd_state(cmd),
1661 cmd->t_state, sense_reason);
1662 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1663 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1664 (cmd->transport_state & CMD_T_STOP) != 0,
1665 (cmd->transport_state & CMD_T_SENT) != 0);
1666
1667 /*
1668 * For SAM Task Attribute emulation for failed struct se_cmd
1669 */
1670 transport_complete_task_attr(cmd);
1671 /*
1672 * Handle special case for COMPARE_AND_WRITE failure, where the
1673 * callback is expected to drop the per device ->caw_sem.
1674 */
1675 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1676 cmd->transport_complete_callback)
1677 cmd->transport_complete_callback(cmd, false, &post_ret);
1678
1679 switch (sense_reason) {
1680 case TCM_NON_EXISTENT_LUN:
1681 case TCM_UNSUPPORTED_SCSI_OPCODE:
1682 case TCM_INVALID_CDB_FIELD:
1683 case TCM_INVALID_PARAMETER_LIST:
1684 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1685 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1686 case TCM_UNKNOWN_MODE_PAGE:
1687 case TCM_WRITE_PROTECTED:
1688 case TCM_ADDRESS_OUT_OF_RANGE:
1689 case TCM_CHECK_CONDITION_ABORT_CMD:
1690 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1691 case TCM_CHECK_CONDITION_NOT_READY:
1692 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1693 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1694 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1695 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1696 case TCM_TOO_MANY_TARGET_DESCS:
1697 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1698 case TCM_TOO_MANY_SEGMENT_DESCS:
1699 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1700 break;
1701 case TCM_OUT_OF_RESOURCES:
1702 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1703 break;
1704 case TCM_RESERVATION_CONFLICT:
1705 /*
1706 * No SENSE Data payload for this case, set SCSI Status
1707 * and queue the response to $FABRIC_MOD.
1708 *
1709 * Uses linux/include/scsi/scsi.h SAM status codes defs
1710 */
1711 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1712 /*
1713 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1714 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1715 * CONFLICT STATUS.
1716 *
1717 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1718 */
1719 if (cmd->se_sess &&
1720 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1721 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1722 cmd->orig_fe_lun, 0x2C,
1723 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1724 }
1725 trace_target_cmd_complete(cmd);
1726 ret = cmd->se_tfo->queue_status(cmd);
1727 if (ret == -EAGAIN || ret == -ENOMEM)
1728 goto queue_full;
1729 goto check_stop;
1730 default:
1731 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1732 cmd->t_task_cdb[0], sense_reason);
1733 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1734 break;
1735 }
1736
1737 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1738 if (ret == -EAGAIN || ret == -ENOMEM)
1739 goto queue_full;
1740
1741 check_stop:
1742 transport_lun_remove_cmd(cmd);
1743 transport_cmd_check_stop_to_fabric(cmd);
1744 return;
1745
1746 queue_full:
1747 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1748 transport_handle_queue_full(cmd, cmd->se_dev);
1749 }
1750 EXPORT_SYMBOL(transport_generic_request_failure);
1751
1752 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1753 {
1754 sense_reason_t ret;
1755
1756 if (!cmd->execute_cmd) {
1757 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1758 goto err;
1759 }
1760 if (do_checks) {
1761 /*
1762 * Check for an existing UNIT ATTENTION condition after
1763 * target_handle_task_attr() has done SAM task attr
1764 * checking, and possibly have already defered execution
1765 * out to target_restart_delayed_cmds() context.
1766 */
1767 ret = target_scsi3_ua_check(cmd);
1768 if (ret)
1769 goto err;
1770
1771 ret = target_alua_state_check(cmd);
1772 if (ret)
1773 goto err;
1774
1775 ret = target_check_reservation(cmd);
1776 if (ret) {
1777 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1778 goto err;
1779 }
1780 }
1781
1782 ret = cmd->execute_cmd(cmd);
1783 if (!ret)
1784 return;
1785 err:
1786 spin_lock_irq(&cmd->t_state_lock);
1787 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1788 spin_unlock_irq(&cmd->t_state_lock);
1789
1790 transport_generic_request_failure(cmd, ret);
1791 }
1792
1793 static int target_write_prot_action(struct se_cmd *cmd)
1794 {
1795 u32 sectors;
1796 /*
1797 * Perform WRITE_INSERT of PI using software emulation when backend
1798 * device has PI enabled, if the transport has not already generated
1799 * PI using hardware WRITE_INSERT offload.
1800 */
1801 switch (cmd->prot_op) {
1802 case TARGET_PROT_DOUT_INSERT:
1803 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1804 sbc_dif_generate(cmd);
1805 break;
1806 case TARGET_PROT_DOUT_STRIP:
1807 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1808 break;
1809
1810 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1811 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1812 sectors, 0, cmd->t_prot_sg, 0);
1813 if (unlikely(cmd->pi_err)) {
1814 spin_lock_irq(&cmd->t_state_lock);
1815 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1816 spin_unlock_irq(&cmd->t_state_lock);
1817 transport_generic_request_failure(cmd, cmd->pi_err);
1818 return -1;
1819 }
1820 break;
1821 default:
1822 break;
1823 }
1824
1825 return 0;
1826 }
1827
1828 static bool target_handle_task_attr(struct se_cmd *cmd)
1829 {
1830 struct se_device *dev = cmd->se_dev;
1831
1832 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1833 return false;
1834
1835 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
1836
1837 /*
1838 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1839 * to allow the passed struct se_cmd list of tasks to the front of the list.
1840 */
1841 switch (cmd->sam_task_attr) {
1842 case TCM_HEAD_TAG:
1843 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1844 cmd->t_task_cdb[0]);
1845 return false;
1846 case TCM_ORDERED_TAG:
1847 atomic_inc_mb(&dev->dev_ordered_sync);
1848
1849 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1850 cmd->t_task_cdb[0]);
1851
1852 /*
1853 * Execute an ORDERED command if no other older commands
1854 * exist that need to be completed first.
1855 */
1856 if (!atomic_read(&dev->simple_cmds))
1857 return false;
1858 break;
1859 default:
1860 /*
1861 * For SIMPLE and UNTAGGED Task Attribute commands
1862 */
1863 atomic_inc_mb(&dev->simple_cmds);
1864 break;
1865 }
1866
1867 if (atomic_read(&dev->dev_ordered_sync) == 0)
1868 return false;
1869
1870 spin_lock(&dev->delayed_cmd_lock);
1871 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1872 spin_unlock(&dev->delayed_cmd_lock);
1873
1874 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1875 cmd->t_task_cdb[0], cmd->sam_task_attr);
1876 return true;
1877 }
1878
1879 static int __transport_check_aborted_status(struct se_cmd *, int);
1880
1881 void target_execute_cmd(struct se_cmd *cmd)
1882 {
1883 /*
1884 * Determine if frontend context caller is requesting the stopping of
1885 * this command for frontend exceptions.
1886 *
1887 * If the received CDB has aleady been aborted stop processing it here.
1888 */
1889 spin_lock_irq(&cmd->t_state_lock);
1890 if (__transport_check_aborted_status(cmd, 1)) {
1891 spin_unlock_irq(&cmd->t_state_lock);
1892 return;
1893 }
1894 if (cmd->transport_state & CMD_T_STOP) {
1895 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1896 __func__, __LINE__, cmd->tag);
1897
1898 spin_unlock_irq(&cmd->t_state_lock);
1899 complete_all(&cmd->t_transport_stop_comp);
1900 return;
1901 }
1902
1903 cmd->t_state = TRANSPORT_PROCESSING;
1904 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1905 spin_unlock_irq(&cmd->t_state_lock);
1906
1907 if (target_write_prot_action(cmd))
1908 return;
1909
1910 if (target_handle_task_attr(cmd)) {
1911 spin_lock_irq(&cmd->t_state_lock);
1912 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1913 spin_unlock_irq(&cmd->t_state_lock);
1914 return;
1915 }
1916
1917 __target_execute_cmd(cmd, true);
1918 }
1919 EXPORT_SYMBOL(target_execute_cmd);
1920
1921 /*
1922 * Process all commands up to the last received ORDERED task attribute which
1923 * requires another blocking boundary
1924 */
1925 static void target_restart_delayed_cmds(struct se_device *dev)
1926 {
1927 for (;;) {
1928 struct se_cmd *cmd;
1929
1930 spin_lock(&dev->delayed_cmd_lock);
1931 if (list_empty(&dev->delayed_cmd_list)) {
1932 spin_unlock(&dev->delayed_cmd_lock);
1933 break;
1934 }
1935
1936 cmd = list_entry(dev->delayed_cmd_list.next,
1937 struct se_cmd, se_delayed_node);
1938 list_del(&cmd->se_delayed_node);
1939 spin_unlock(&dev->delayed_cmd_lock);
1940
1941 __target_execute_cmd(cmd, true);
1942
1943 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1944 break;
1945 }
1946 }
1947
1948 /*
1949 * Called from I/O completion to determine which dormant/delayed
1950 * and ordered cmds need to have their tasks added to the execution queue.
1951 */
1952 static void transport_complete_task_attr(struct se_cmd *cmd)
1953 {
1954 struct se_device *dev = cmd->se_dev;
1955
1956 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1957 return;
1958
1959 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
1960 goto restart;
1961
1962 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1963 atomic_dec_mb(&dev->simple_cmds);
1964 dev->dev_cur_ordered_id++;
1965 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
1966 dev->dev_cur_ordered_id);
1967 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1968 dev->dev_cur_ordered_id++;
1969 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
1970 dev->dev_cur_ordered_id);
1971 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1972 atomic_dec_mb(&dev->dev_ordered_sync);
1973
1974 dev->dev_cur_ordered_id++;
1975 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
1976 dev->dev_cur_ordered_id);
1977 }
1978 restart:
1979 target_restart_delayed_cmds(dev);
1980 }
1981
1982 static void transport_complete_qf(struct se_cmd *cmd)
1983 {
1984 int ret = 0;
1985
1986 transport_complete_task_attr(cmd);
1987
1988 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1989 trace_target_cmd_complete(cmd);
1990 ret = cmd->se_tfo->queue_status(cmd);
1991 goto out;
1992 }
1993
1994 switch (cmd->data_direction) {
1995 case DMA_FROM_DEVICE:
1996 if (cmd->scsi_status)
1997 goto queue_status;
1998
1999 trace_target_cmd_complete(cmd);
2000 ret = cmd->se_tfo->queue_data_in(cmd);
2001 break;
2002 case DMA_TO_DEVICE:
2003 if (cmd->se_cmd_flags & SCF_BIDI) {
2004 ret = cmd->se_tfo->queue_data_in(cmd);
2005 break;
2006 }
2007 /* Fall through for DMA_TO_DEVICE */
2008 case DMA_NONE:
2009 queue_status:
2010 trace_target_cmd_complete(cmd);
2011 ret = cmd->se_tfo->queue_status(cmd);
2012 break;
2013 default:
2014 break;
2015 }
2016
2017 out:
2018 if (ret < 0) {
2019 transport_handle_queue_full(cmd, cmd->se_dev);
2020 return;
2021 }
2022 transport_lun_remove_cmd(cmd);
2023 transport_cmd_check_stop_to_fabric(cmd);
2024 }
2025
2026 static void transport_handle_queue_full(
2027 struct se_cmd *cmd,
2028 struct se_device *dev)
2029 {
2030 spin_lock_irq(&dev->qf_cmd_lock);
2031 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2032 atomic_inc_mb(&dev->dev_qf_count);
2033 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2034
2035 schedule_work(&cmd->se_dev->qf_work_queue);
2036 }
2037
2038 static bool target_read_prot_action(struct se_cmd *cmd)
2039 {
2040 switch (cmd->prot_op) {
2041 case TARGET_PROT_DIN_STRIP:
2042 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2043 u32 sectors = cmd->data_length >>
2044 ilog2(cmd->se_dev->dev_attrib.block_size);
2045
2046 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2047 sectors, 0, cmd->t_prot_sg,
2048 0);
2049 if (cmd->pi_err)
2050 return true;
2051 }
2052 break;
2053 case TARGET_PROT_DIN_INSERT:
2054 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2055 break;
2056
2057 sbc_dif_generate(cmd);
2058 break;
2059 default:
2060 break;
2061 }
2062
2063 return false;
2064 }
2065
2066 static void target_complete_ok_work(struct work_struct *work)
2067 {
2068 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2069 int ret;
2070
2071 /*
2072 * Check if we need to move delayed/dormant tasks from cmds on the
2073 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2074 * Attribute.
2075 */
2076 transport_complete_task_attr(cmd);
2077
2078 /*
2079 * Check to schedule QUEUE_FULL work, or execute an existing
2080 * cmd->transport_qf_callback()
2081 */
2082 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2083 schedule_work(&cmd->se_dev->qf_work_queue);
2084
2085 /*
2086 * Check if we need to send a sense buffer from
2087 * the struct se_cmd in question.
2088 */
2089 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2090 WARN_ON(!cmd->scsi_status);
2091 ret = transport_send_check_condition_and_sense(
2092 cmd, 0, 1);
2093 if (ret == -EAGAIN || ret == -ENOMEM)
2094 goto queue_full;
2095
2096 transport_lun_remove_cmd(cmd);
2097 transport_cmd_check_stop_to_fabric(cmd);
2098 return;
2099 }
2100 /*
2101 * Check for a callback, used by amongst other things
2102 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2103 */
2104 if (cmd->transport_complete_callback) {
2105 sense_reason_t rc;
2106 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2107 bool zero_dl = !(cmd->data_length);
2108 int post_ret = 0;
2109
2110 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2111 if (!rc && !post_ret) {
2112 if (caw && zero_dl)
2113 goto queue_rsp;
2114
2115 return;
2116 } else if (rc) {
2117 ret = transport_send_check_condition_and_sense(cmd,
2118 rc, 0);
2119 if (ret == -EAGAIN || ret == -ENOMEM)
2120 goto queue_full;
2121
2122 transport_lun_remove_cmd(cmd);
2123 transport_cmd_check_stop_to_fabric(cmd);
2124 return;
2125 }
2126 }
2127
2128 queue_rsp:
2129 switch (cmd->data_direction) {
2130 case DMA_FROM_DEVICE:
2131 if (cmd->scsi_status)
2132 goto queue_status;
2133
2134 atomic_long_add(cmd->data_length,
2135 &cmd->se_lun->lun_stats.tx_data_octets);
2136 /*
2137 * Perform READ_STRIP of PI using software emulation when
2138 * backend had PI enabled, if the transport will not be
2139 * performing hardware READ_STRIP offload.
2140 */
2141 if (target_read_prot_action(cmd)) {
2142 ret = transport_send_check_condition_and_sense(cmd,
2143 cmd->pi_err, 0);
2144 if (ret == -EAGAIN || ret == -ENOMEM)
2145 goto queue_full;
2146
2147 transport_lun_remove_cmd(cmd);
2148 transport_cmd_check_stop_to_fabric(cmd);
2149 return;
2150 }
2151
2152 trace_target_cmd_complete(cmd);
2153 ret = cmd->se_tfo->queue_data_in(cmd);
2154 if (ret == -EAGAIN || ret == -ENOMEM)
2155 goto queue_full;
2156 break;
2157 case DMA_TO_DEVICE:
2158 atomic_long_add(cmd->data_length,
2159 &cmd->se_lun->lun_stats.rx_data_octets);
2160 /*
2161 * Check if we need to send READ payload for BIDI-COMMAND
2162 */
2163 if (cmd->se_cmd_flags & SCF_BIDI) {
2164 atomic_long_add(cmd->data_length,
2165 &cmd->se_lun->lun_stats.tx_data_octets);
2166 ret = cmd->se_tfo->queue_data_in(cmd);
2167 if (ret == -EAGAIN || ret == -ENOMEM)
2168 goto queue_full;
2169 break;
2170 }
2171 /* Fall through for DMA_TO_DEVICE */
2172 case DMA_NONE:
2173 queue_status:
2174 trace_target_cmd_complete(cmd);
2175 ret = cmd->se_tfo->queue_status(cmd);
2176 if (ret == -EAGAIN || ret == -ENOMEM)
2177 goto queue_full;
2178 break;
2179 default:
2180 break;
2181 }
2182
2183 transport_lun_remove_cmd(cmd);
2184 transport_cmd_check_stop_to_fabric(cmd);
2185 return;
2186
2187 queue_full:
2188 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2189 " data_direction: %d\n", cmd, cmd->data_direction);
2190 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2191 transport_handle_queue_full(cmd, cmd->se_dev);
2192 }
2193
2194 void target_free_sgl(struct scatterlist *sgl, int nents)
2195 {
2196 struct scatterlist *sg;
2197 int count;
2198
2199 for_each_sg(sgl, sg, nents, count)
2200 __free_page(sg_page(sg));
2201
2202 kfree(sgl);
2203 }
2204 EXPORT_SYMBOL(target_free_sgl);
2205
2206 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2207 {
2208 /*
2209 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2210 * emulation, and free + reset pointers if necessary..
2211 */
2212 if (!cmd->t_data_sg_orig)
2213 return;
2214
2215 kfree(cmd->t_data_sg);
2216 cmd->t_data_sg = cmd->t_data_sg_orig;
2217 cmd->t_data_sg_orig = NULL;
2218 cmd->t_data_nents = cmd->t_data_nents_orig;
2219 cmd->t_data_nents_orig = 0;
2220 }
2221
2222 static inline void transport_free_pages(struct se_cmd *cmd)
2223 {
2224 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2225 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2226 cmd->t_prot_sg = NULL;
2227 cmd->t_prot_nents = 0;
2228 }
2229
2230 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2231 /*
2232 * Release special case READ buffer payload required for
2233 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2234 */
2235 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2236 target_free_sgl(cmd->t_bidi_data_sg,
2237 cmd->t_bidi_data_nents);
2238 cmd->t_bidi_data_sg = NULL;
2239 cmd->t_bidi_data_nents = 0;
2240 }
2241 transport_reset_sgl_orig(cmd);
2242 return;
2243 }
2244 transport_reset_sgl_orig(cmd);
2245
2246 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2247 cmd->t_data_sg = NULL;
2248 cmd->t_data_nents = 0;
2249
2250 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2251 cmd->t_bidi_data_sg = NULL;
2252 cmd->t_bidi_data_nents = 0;
2253 }
2254
2255 /**
2256 * transport_put_cmd - release a reference to a command
2257 * @cmd: command to release
2258 *
2259 * This routine releases our reference to the command and frees it if possible.
2260 */
2261 static int transport_put_cmd(struct se_cmd *cmd)
2262 {
2263 BUG_ON(!cmd->se_tfo);
2264 /*
2265 * If this cmd has been setup with target_get_sess_cmd(), drop
2266 * the kref and call ->release_cmd() in kref callback.
2267 */
2268 return target_put_sess_cmd(cmd);
2269 }
2270
2271 void *transport_kmap_data_sg(struct se_cmd *cmd)
2272 {
2273 struct scatterlist *sg = cmd->t_data_sg;
2274 struct page **pages;
2275 int i;
2276
2277 /*
2278 * We need to take into account a possible offset here for fabrics like
2279 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2280 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2281 */
2282 if (!cmd->t_data_nents)
2283 return NULL;
2284
2285 BUG_ON(!sg);
2286 if (cmd->t_data_nents == 1)
2287 return kmap(sg_page(sg)) + sg->offset;
2288
2289 /* >1 page. use vmap */
2290 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2291 if (!pages)
2292 return NULL;
2293
2294 /* convert sg[] to pages[] */
2295 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2296 pages[i] = sg_page(sg);
2297 }
2298
2299 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2300 kfree(pages);
2301 if (!cmd->t_data_vmap)
2302 return NULL;
2303
2304 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2305 }
2306 EXPORT_SYMBOL(transport_kmap_data_sg);
2307
2308 void transport_kunmap_data_sg(struct se_cmd *cmd)
2309 {
2310 if (!cmd->t_data_nents) {
2311 return;
2312 } else if (cmd->t_data_nents == 1) {
2313 kunmap(sg_page(cmd->t_data_sg));
2314 return;
2315 }
2316
2317 vunmap(cmd->t_data_vmap);
2318 cmd->t_data_vmap = NULL;
2319 }
2320 EXPORT_SYMBOL(transport_kunmap_data_sg);
2321
2322 int
2323 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2324 bool zero_page, bool chainable)
2325 {
2326 struct scatterlist *sg;
2327 struct page *page;
2328 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2329 unsigned int nalloc, nent;
2330 int i = 0;
2331
2332 nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
2333 if (chainable)
2334 nalloc++;
2335 sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
2336 if (!sg)
2337 return -ENOMEM;
2338
2339 sg_init_table(sg, nalloc);
2340
2341 while (length) {
2342 u32 page_len = min_t(u32, length, PAGE_SIZE);
2343 page = alloc_page(GFP_KERNEL | zero_flag);
2344 if (!page)
2345 goto out;
2346
2347 sg_set_page(&sg[i], page, page_len, 0);
2348 length -= page_len;
2349 i++;
2350 }
2351 *sgl = sg;
2352 *nents = nent;
2353 return 0;
2354
2355 out:
2356 while (i > 0) {
2357 i--;
2358 __free_page(sg_page(&sg[i]));
2359 }
2360 kfree(sg);
2361 return -ENOMEM;
2362 }
2363 EXPORT_SYMBOL(target_alloc_sgl);
2364
2365 /*
2366 * Allocate any required resources to execute the command. For writes we
2367 * might not have the payload yet, so notify the fabric via a call to
2368 * ->write_pending instead. Otherwise place it on the execution queue.
2369 */
2370 sense_reason_t
2371 transport_generic_new_cmd(struct se_cmd *cmd)
2372 {
2373 int ret = 0;
2374 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2375
2376 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2377 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2378 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2379 cmd->prot_length, true, false);
2380 if (ret < 0)
2381 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2382 }
2383
2384 /*
2385 * Determine is the TCM fabric module has already allocated physical
2386 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2387 * beforehand.
2388 */
2389 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2390 cmd->data_length) {
2391
2392 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2393 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2394 u32 bidi_length;
2395
2396 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2397 bidi_length = cmd->t_task_nolb *
2398 cmd->se_dev->dev_attrib.block_size;
2399 else
2400 bidi_length = cmd->data_length;
2401
2402 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2403 &cmd->t_bidi_data_nents,
2404 bidi_length, zero_flag, false);
2405 if (ret < 0)
2406 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2407 }
2408
2409 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2410 cmd->data_length, zero_flag, false);
2411 if (ret < 0)
2412 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2413 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2414 cmd->data_length) {
2415 /*
2416 * Special case for COMPARE_AND_WRITE with fabrics
2417 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2418 */
2419 u32 caw_length = cmd->t_task_nolb *
2420 cmd->se_dev->dev_attrib.block_size;
2421
2422 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2423 &cmd->t_bidi_data_nents,
2424 caw_length, zero_flag, false);
2425 if (ret < 0)
2426 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2427 }
2428 /*
2429 * If this command is not a write we can execute it right here,
2430 * for write buffers we need to notify the fabric driver first
2431 * and let it call back once the write buffers are ready.
2432 */
2433 target_add_to_state_list(cmd);
2434 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2435 target_execute_cmd(cmd);
2436 return 0;
2437 }
2438 transport_cmd_check_stop(cmd, false, true);
2439
2440 ret = cmd->se_tfo->write_pending(cmd);
2441 if (ret == -EAGAIN || ret == -ENOMEM)
2442 goto queue_full;
2443
2444 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
2445 WARN_ON(ret);
2446
2447 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2448
2449 queue_full:
2450 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2451 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2452 transport_handle_queue_full(cmd, cmd->se_dev);
2453 return 0;
2454 }
2455 EXPORT_SYMBOL(transport_generic_new_cmd);
2456
2457 static void transport_write_pending_qf(struct se_cmd *cmd)
2458 {
2459 int ret;
2460
2461 ret = cmd->se_tfo->write_pending(cmd);
2462 if (ret == -EAGAIN || ret == -ENOMEM) {
2463 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2464 cmd);
2465 transport_handle_queue_full(cmd, cmd->se_dev);
2466 }
2467 }
2468
2469 static bool
2470 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2471 unsigned long *flags);
2472
2473 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2474 {
2475 unsigned long flags;
2476
2477 spin_lock_irqsave(&cmd->t_state_lock, flags);
2478 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2479 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2480 }
2481
2482 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2483 {
2484 int ret = 0;
2485 bool aborted = false, tas = false;
2486
2487 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2488 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2489 target_wait_free_cmd(cmd, &aborted, &tas);
2490
2491 if (!aborted || tas)
2492 ret = transport_put_cmd(cmd);
2493 } else {
2494 if (wait_for_tasks)
2495 target_wait_free_cmd(cmd, &aborted, &tas);
2496 /*
2497 * Handle WRITE failure case where transport_generic_new_cmd()
2498 * has already added se_cmd to state_list, but fabric has
2499 * failed command before I/O submission.
2500 */
2501 if (cmd->state_active)
2502 target_remove_from_state_list(cmd);
2503
2504 if (cmd->se_lun)
2505 transport_lun_remove_cmd(cmd);
2506
2507 if (!aborted || tas)
2508 ret = transport_put_cmd(cmd);
2509 }
2510 /*
2511 * If the task has been internally aborted due to TMR ABORT_TASK
2512 * or LUN_RESET, target_core_tmr.c is responsible for performing
2513 * the remaining calls to target_put_sess_cmd(), and not the
2514 * callers of this function.
2515 */
2516 if (aborted) {
2517 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2518 wait_for_completion(&cmd->cmd_wait_comp);
2519 cmd->se_tfo->release_cmd(cmd);
2520 ret = 1;
2521 }
2522 return ret;
2523 }
2524 EXPORT_SYMBOL(transport_generic_free_cmd);
2525
2526 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
2527 * @se_cmd: command descriptor to add
2528 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2529 */
2530 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2531 {
2532 struct se_session *se_sess = se_cmd->se_sess;
2533 unsigned long flags;
2534 int ret = 0;
2535
2536 /*
2537 * Add a second kref if the fabric caller is expecting to handle
2538 * fabric acknowledgement that requires two target_put_sess_cmd()
2539 * invocations before se_cmd descriptor release.
2540 */
2541 if (ack_kref) {
2542 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2543 return -EINVAL;
2544
2545 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2546 }
2547
2548 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2549 if (se_sess->sess_tearing_down) {
2550 ret = -ESHUTDOWN;
2551 goto out;
2552 }
2553 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2554 out:
2555 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2556
2557 if (ret && ack_kref)
2558 target_put_sess_cmd(se_cmd);
2559
2560 return ret;
2561 }
2562 EXPORT_SYMBOL(target_get_sess_cmd);
2563
2564 static void target_free_cmd_mem(struct se_cmd *cmd)
2565 {
2566 transport_free_pages(cmd);
2567
2568 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2569 core_tmr_release_req(cmd->se_tmr_req);
2570 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2571 kfree(cmd->t_task_cdb);
2572 }
2573
2574 static void target_release_cmd_kref(struct kref *kref)
2575 {
2576 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2577 struct se_session *se_sess = se_cmd->se_sess;
2578 unsigned long flags;
2579 bool fabric_stop;
2580
2581 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2582
2583 spin_lock(&se_cmd->t_state_lock);
2584 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
2585 (se_cmd->transport_state & CMD_T_ABORTED);
2586 spin_unlock(&se_cmd->t_state_lock);
2587
2588 if (se_cmd->cmd_wait_set || fabric_stop) {
2589 list_del_init(&se_cmd->se_cmd_list);
2590 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2591 target_free_cmd_mem(se_cmd);
2592 complete(&se_cmd->cmd_wait_comp);
2593 return;
2594 }
2595 list_del_init(&se_cmd->se_cmd_list);
2596 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2597
2598 target_free_cmd_mem(se_cmd);
2599 se_cmd->se_tfo->release_cmd(se_cmd);
2600 }
2601
2602 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2603 * @se_cmd: command descriptor to drop
2604 */
2605 int target_put_sess_cmd(struct se_cmd *se_cmd)
2606 {
2607 struct se_session *se_sess = se_cmd->se_sess;
2608
2609 if (!se_sess) {
2610 target_free_cmd_mem(se_cmd);
2611 se_cmd->se_tfo->release_cmd(se_cmd);
2612 return 1;
2613 }
2614 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2615 }
2616 EXPORT_SYMBOL(target_put_sess_cmd);
2617
2618 /* target_sess_cmd_list_set_waiting - Flag all commands in
2619 * sess_cmd_list to complete cmd_wait_comp. Set
2620 * sess_tearing_down so no more commands are queued.
2621 * @se_sess: session to flag
2622 */
2623 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2624 {
2625 struct se_cmd *se_cmd, *tmp_cmd;
2626 unsigned long flags;
2627 int rc;
2628
2629 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2630 if (se_sess->sess_tearing_down) {
2631 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2632 return;
2633 }
2634 se_sess->sess_tearing_down = 1;
2635 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2636
2637 list_for_each_entry_safe(se_cmd, tmp_cmd,
2638 &se_sess->sess_wait_list, se_cmd_list) {
2639 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2640 if (rc) {
2641 se_cmd->cmd_wait_set = 1;
2642 spin_lock(&se_cmd->t_state_lock);
2643 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2644 spin_unlock(&se_cmd->t_state_lock);
2645 } else
2646 list_del_init(&se_cmd->se_cmd_list);
2647 }
2648
2649 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2650 }
2651 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2652
2653 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
2654 * @se_sess: session to wait for active I/O
2655 */
2656 void target_wait_for_sess_cmds(struct se_session *se_sess)
2657 {
2658 struct se_cmd *se_cmd, *tmp_cmd;
2659 unsigned long flags;
2660 bool tas;
2661
2662 list_for_each_entry_safe(se_cmd, tmp_cmd,
2663 &se_sess->sess_wait_list, se_cmd_list) {
2664 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2665 " %d\n", se_cmd, se_cmd->t_state,
2666 se_cmd->se_tfo->get_cmd_state(se_cmd));
2667
2668 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2669 tas = (se_cmd->transport_state & CMD_T_TAS);
2670 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2671
2672 if (!target_put_sess_cmd(se_cmd)) {
2673 if (tas)
2674 target_put_sess_cmd(se_cmd);
2675 }
2676
2677 wait_for_completion(&se_cmd->cmd_wait_comp);
2678 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2679 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2680 se_cmd->se_tfo->get_cmd_state(se_cmd));
2681
2682 se_cmd->se_tfo->release_cmd(se_cmd);
2683 }
2684
2685 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2686 WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2687 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2688
2689 }
2690 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2691
2692 void transport_clear_lun_ref(struct se_lun *lun)
2693 {
2694 percpu_ref_kill(&lun->lun_ref);
2695 wait_for_completion(&lun->lun_ref_comp);
2696 }
2697
2698 static bool
2699 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2700 bool *aborted, bool *tas, unsigned long *flags)
2701 __releases(&cmd->t_state_lock)
2702 __acquires(&cmd->t_state_lock)
2703 {
2704
2705 assert_spin_locked(&cmd->t_state_lock);
2706 WARN_ON_ONCE(!irqs_disabled());
2707
2708 if (fabric_stop)
2709 cmd->transport_state |= CMD_T_FABRIC_STOP;
2710
2711 if (cmd->transport_state & CMD_T_ABORTED)
2712 *aborted = true;
2713
2714 if (cmd->transport_state & CMD_T_TAS)
2715 *tas = true;
2716
2717 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2718 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2719 return false;
2720
2721 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2722 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2723 return false;
2724
2725 if (!(cmd->transport_state & CMD_T_ACTIVE))
2726 return false;
2727
2728 if (fabric_stop && *aborted)
2729 return false;
2730
2731 cmd->transport_state |= CMD_T_STOP;
2732
2733 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2734 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2735 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2736
2737 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2738
2739 wait_for_completion(&cmd->t_transport_stop_comp);
2740
2741 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2742 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2743
2744 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2745 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2746
2747 return true;
2748 }
2749
2750 /**
2751 * transport_wait_for_tasks - wait for completion to occur
2752 * @cmd: command to wait
2753 *
2754 * Called from frontend fabric context to wait for storage engine
2755 * to pause and/or release frontend generated struct se_cmd.
2756 */
2757 bool transport_wait_for_tasks(struct se_cmd *cmd)
2758 {
2759 unsigned long flags;
2760 bool ret, aborted = false, tas = false;
2761
2762 spin_lock_irqsave(&cmd->t_state_lock, flags);
2763 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2764 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2765
2766 return ret;
2767 }
2768 EXPORT_SYMBOL(transport_wait_for_tasks);
2769
2770 struct sense_info {
2771 u8 key;
2772 u8 asc;
2773 u8 ascq;
2774 bool add_sector_info;
2775 };
2776
2777 static const struct sense_info sense_info_table[] = {
2778 [TCM_NO_SENSE] = {
2779 .key = NOT_READY
2780 },
2781 [TCM_NON_EXISTENT_LUN] = {
2782 .key = ILLEGAL_REQUEST,
2783 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
2784 },
2785 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
2786 .key = ILLEGAL_REQUEST,
2787 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2788 },
2789 [TCM_SECTOR_COUNT_TOO_MANY] = {
2790 .key = ILLEGAL_REQUEST,
2791 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2792 },
2793 [TCM_UNKNOWN_MODE_PAGE] = {
2794 .key = ILLEGAL_REQUEST,
2795 .asc = 0x24, /* INVALID FIELD IN CDB */
2796 },
2797 [TCM_CHECK_CONDITION_ABORT_CMD] = {
2798 .key = ABORTED_COMMAND,
2799 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
2800 .ascq = 0x03,
2801 },
2802 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
2803 .key = ABORTED_COMMAND,
2804 .asc = 0x0c, /* WRITE ERROR */
2805 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
2806 },
2807 [TCM_INVALID_CDB_FIELD] = {
2808 .key = ILLEGAL_REQUEST,
2809 .asc = 0x24, /* INVALID FIELD IN CDB */
2810 },
2811 [TCM_INVALID_PARAMETER_LIST] = {
2812 .key = ILLEGAL_REQUEST,
2813 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
2814 },
2815 [TCM_TOO_MANY_TARGET_DESCS] = {
2816 .key = ILLEGAL_REQUEST,
2817 .asc = 0x26,
2818 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
2819 },
2820 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
2821 .key = ILLEGAL_REQUEST,
2822 .asc = 0x26,
2823 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
2824 },
2825 [TCM_TOO_MANY_SEGMENT_DESCS] = {
2826 .key = ILLEGAL_REQUEST,
2827 .asc = 0x26,
2828 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
2829 },
2830 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
2831 .key = ILLEGAL_REQUEST,
2832 .asc = 0x26,
2833 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
2834 },
2835 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
2836 .key = ILLEGAL_REQUEST,
2837 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
2838 },
2839 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
2840 .key = ILLEGAL_REQUEST,
2841 .asc = 0x0c, /* WRITE ERROR */
2842 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
2843 },
2844 [TCM_SERVICE_CRC_ERROR] = {
2845 .key = ABORTED_COMMAND,
2846 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
2847 .ascq = 0x05, /* N/A */
2848 },
2849 [TCM_SNACK_REJECTED] = {
2850 .key = ABORTED_COMMAND,
2851 .asc = 0x11, /* READ ERROR */
2852 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
2853 },
2854 [TCM_WRITE_PROTECTED] = {
2855 .key = DATA_PROTECT,
2856 .asc = 0x27, /* WRITE PROTECTED */
2857 },
2858 [TCM_ADDRESS_OUT_OF_RANGE] = {
2859 .key = ILLEGAL_REQUEST,
2860 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2861 },
2862 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
2863 .key = UNIT_ATTENTION,
2864 },
2865 [TCM_CHECK_CONDITION_NOT_READY] = {
2866 .key = NOT_READY,
2867 },
2868 [TCM_MISCOMPARE_VERIFY] = {
2869 .key = MISCOMPARE,
2870 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
2871 .ascq = 0x00,
2872 },
2873 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
2874 .key = ABORTED_COMMAND,
2875 .asc = 0x10,
2876 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
2877 .add_sector_info = true,
2878 },
2879 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
2880 .key = ABORTED_COMMAND,
2881 .asc = 0x10,
2882 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2883 .add_sector_info = true,
2884 },
2885 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
2886 .key = ABORTED_COMMAND,
2887 .asc = 0x10,
2888 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2889 .add_sector_info = true,
2890 },
2891 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
2892 .key = COPY_ABORTED,
2893 .asc = 0x0d,
2894 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
2895
2896 },
2897 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2898 /*
2899 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2900 * Solaris initiators. Returning NOT READY instead means the
2901 * operations will be retried a finite number of times and we
2902 * can survive intermittent errors.
2903 */
2904 .key = NOT_READY,
2905 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
2906 },
2907 };
2908
2909 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
2910 {
2911 const struct sense_info *si;
2912 u8 *buffer = cmd->sense_buffer;
2913 int r = (__force int)reason;
2914 u8 asc, ascq;
2915 bool desc_format = target_sense_desc_format(cmd->se_dev);
2916
2917 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
2918 si = &sense_info_table[r];
2919 else
2920 si = &sense_info_table[(__force int)
2921 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
2922
2923 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
2924 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2925 WARN_ON_ONCE(asc == 0);
2926 } else if (si->asc == 0) {
2927 WARN_ON_ONCE(cmd->scsi_asc == 0);
2928 asc = cmd->scsi_asc;
2929 ascq = cmd->scsi_ascq;
2930 } else {
2931 asc = si->asc;
2932 ascq = si->ascq;
2933 }
2934
2935 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
2936 if (si->add_sector_info)
2937 return scsi_set_sense_information(buffer,
2938 cmd->scsi_sense_length,
2939 cmd->bad_sector);
2940
2941 return 0;
2942 }
2943
2944 int
2945 transport_send_check_condition_and_sense(struct se_cmd *cmd,
2946 sense_reason_t reason, int from_transport)
2947 {
2948 unsigned long flags;
2949
2950 spin_lock_irqsave(&cmd->t_state_lock, flags);
2951 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2952 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2953 return 0;
2954 }
2955 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2956 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2957
2958 if (!from_transport) {
2959 int rc;
2960
2961 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2962 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2963 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
2964 rc = translate_sense_reason(cmd, reason);
2965 if (rc)
2966 return rc;
2967 }
2968
2969 trace_target_cmd_complete(cmd);
2970 return cmd->se_tfo->queue_status(cmd);
2971 }
2972 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2973
2974 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2975 __releases(&cmd->t_state_lock)
2976 __acquires(&cmd->t_state_lock)
2977 {
2978 assert_spin_locked(&cmd->t_state_lock);
2979 WARN_ON_ONCE(!irqs_disabled());
2980
2981 if (!(cmd->transport_state & CMD_T_ABORTED))
2982 return 0;
2983 /*
2984 * If cmd has been aborted but either no status is to be sent or it has
2985 * already been sent, just return
2986 */
2987 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
2988 if (send_status)
2989 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2990 return 1;
2991 }
2992
2993 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
2994 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
2995
2996 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2997 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2998 trace_target_cmd_complete(cmd);
2999
3000 spin_unlock_irq(&cmd->t_state_lock);
3001 cmd->se_tfo->queue_status(cmd);
3002 spin_lock_irq(&cmd->t_state_lock);
3003
3004 return 1;
3005 }
3006
3007 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3008 {
3009 int ret;
3010
3011 spin_lock_irq(&cmd->t_state_lock);
3012 ret = __transport_check_aborted_status(cmd, send_status);
3013 spin_unlock_irq(&cmd->t_state_lock);
3014
3015 return ret;
3016 }
3017 EXPORT_SYMBOL(transport_check_aborted_status);
3018
3019 void transport_send_task_abort(struct se_cmd *cmd)
3020 {
3021 unsigned long flags;
3022
3023 spin_lock_irqsave(&cmd->t_state_lock, flags);
3024 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3025 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3026 return;
3027 }
3028 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3029
3030 /*
3031 * If there are still expected incoming fabric WRITEs, we wait
3032 * until until they have completed before sending a TASK_ABORTED
3033 * response. This response with TASK_ABORTED status will be
3034 * queued back to fabric module by transport_check_aborted_status().
3035 */
3036 if (cmd->data_direction == DMA_TO_DEVICE) {
3037 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3038 spin_lock_irqsave(&cmd->t_state_lock, flags);
3039 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3040 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3041 goto send_abort;
3042 }
3043 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3044 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3045 return;
3046 }
3047 }
3048 send_abort:
3049 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3050
3051 transport_lun_remove_cmd(cmd);
3052
3053 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3054 cmd->t_task_cdb[0], cmd->tag);
3055
3056 trace_target_cmd_complete(cmd);
3057 cmd->se_tfo->queue_status(cmd);
3058 }
3059
3060 static void target_tmr_work(struct work_struct *work)
3061 {
3062 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3063 struct se_device *dev = cmd->se_dev;
3064 struct se_tmr_req *tmr = cmd->se_tmr_req;
3065 unsigned long flags;
3066 int ret;
3067
3068 spin_lock_irqsave(&cmd->t_state_lock, flags);
3069 if (cmd->transport_state & CMD_T_ABORTED) {
3070 tmr->response = TMR_FUNCTION_REJECTED;
3071 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3072 goto check_stop;
3073 }
3074 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3075
3076 switch (tmr->function) {
3077 case TMR_ABORT_TASK:
3078 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3079 break;
3080 case TMR_ABORT_TASK_SET:
3081 case TMR_CLEAR_ACA:
3082 case TMR_CLEAR_TASK_SET:
3083 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3084 break;
3085 case TMR_LUN_RESET:
3086 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3087 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3088 TMR_FUNCTION_REJECTED;
3089 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3090 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3091 cmd->orig_fe_lun, 0x29,
3092 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3093 }
3094 break;
3095 case TMR_TARGET_WARM_RESET:
3096 tmr->response = TMR_FUNCTION_REJECTED;
3097 break;
3098 case TMR_TARGET_COLD_RESET:
3099 tmr->response = TMR_FUNCTION_REJECTED;
3100 break;
3101 default:
3102 pr_err("Uknown TMR function: 0x%02x.\n",
3103 tmr->function);
3104 tmr->response = TMR_FUNCTION_REJECTED;
3105 break;
3106 }
3107
3108 spin_lock_irqsave(&cmd->t_state_lock, flags);
3109 if (cmd->transport_state & CMD_T_ABORTED) {
3110 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3111 goto check_stop;
3112 }
3113 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3114 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3115
3116 cmd->se_tfo->queue_tm_rsp(cmd);
3117
3118 check_stop:
3119 transport_cmd_check_stop_to_fabric(cmd);
3120 }
3121
3122 int transport_generic_handle_tmr(
3123 struct se_cmd *cmd)
3124 {
3125 unsigned long flags;
3126
3127 spin_lock_irqsave(&cmd->t_state_lock, flags);
3128 cmd->transport_state |= CMD_T_ACTIVE;
3129 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3130
3131 INIT_WORK(&cmd->work, target_tmr_work);
3132 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3133 return 0;
3134 }
3135 EXPORT_SYMBOL(transport_generic_handle_tmr);
3136
3137 bool
3138 target_check_wce(struct se_device *dev)
3139 {
3140 bool wce = false;
3141
3142 if (dev->transport->get_write_cache)
3143 wce = dev->transport->get_write_cache(dev);
3144 else if (dev->dev_attrib.emulate_write_cache > 0)
3145 wce = true;
3146
3147 return wce;
3148 }
3149
3150 bool
3151 target_check_fua(struct se_device *dev)
3152 {
3153 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3154 }