]>
Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | |
3 | * | |
4 | * This file contains the Generic Target Engine Core. | |
5 | * | |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
10 | * | |
11 | * Nicholas A. Bellinger <nab@kernel.org> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2 of the License, or | |
16 | * (at your option) any later version. | |
17 | * | |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU General Public License | |
24 | * along with this program; if not, write to the Free Software | |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
26 | * | |
27 | ******************************************************************************/ | |
28 | ||
29 | #include <linux/version.h> | |
30 | #include <linux/net.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/blkdev.h> | |
36 | #include <linux/spinlock.h> | |
c66ac9db NB |
37 | #include <linux/kthread.h> |
38 | #include <linux/in.h> | |
39 | #include <linux/cdrom.h> | |
40 | #include <asm/unaligned.h> | |
41 | #include <net/sock.h> | |
42 | #include <net/tcp.h> | |
43 | #include <scsi/scsi.h> | |
44 | #include <scsi/scsi_cmnd.h> | |
e66ecd50 | 45 | #include <scsi/scsi_tcq.h> |
c66ac9db NB |
46 | |
47 | #include <target/target_core_base.h> | |
48 | #include <target/target_core_device.h> | |
49 | #include <target/target_core_tmr.h> | |
50 | #include <target/target_core_tpg.h> | |
51 | #include <target/target_core_transport.h> | |
52 | #include <target/target_core_fabric_ops.h> | |
53 | #include <target/target_core_configfs.h> | |
54 | ||
55 | #include "target_core_alua.h" | |
56 | #include "target_core_hba.h" | |
57 | #include "target_core_pr.h" | |
58 | #include "target_core_scdb.h" | |
59 | #include "target_core_ua.h" | |
60 | ||
e3d6f909 | 61 | static int sub_api_initialized; |
c66ac9db NB |
62 | |
63 | static struct kmem_cache *se_cmd_cache; | |
64 | static struct kmem_cache *se_sess_cache; | |
65 | struct kmem_cache *se_tmr_req_cache; | |
66 | struct kmem_cache *se_ua_cache; | |
c66ac9db NB |
67 | struct kmem_cache *t10_pr_reg_cache; |
68 | struct kmem_cache *t10_alua_lu_gp_cache; | |
69 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | |
70 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | |
71 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |
72 | ||
73 | /* Used for transport_dev_get_map_*() */ | |
74 | typedef int (*map_func_t)(struct se_task *, u32); | |
75 | ||
76 | static int transport_generic_write_pending(struct se_cmd *); | |
5951146d | 77 | static int transport_processing_thread(void *param); |
c66ac9db NB |
78 | static int __transport_execute_tasks(struct se_device *dev); |
79 | static void transport_complete_task_attr(struct se_cmd *cmd); | |
07bde79a NB |
80 | static int transport_complete_qf(struct se_cmd *cmd); |
81 | static void transport_handle_queue_full(struct se_cmd *cmd, | |
82 | struct se_device *dev, int (*qf_callback)(struct se_cmd *)); | |
c66ac9db NB |
83 | static void transport_direct_request_timeout(struct se_cmd *cmd); |
84 | static void transport_free_dev_tasks(struct se_cmd *cmd); | |
a1d8b49a | 85 | static u32 transport_allocate_tasks(struct se_cmd *cmd, |
ec98f782 | 86 | unsigned long long starting_lba, |
c66ac9db | 87 | enum dma_data_direction data_direction, |
ec98f782 | 88 | struct scatterlist *sgl, unsigned int nents); |
05d1c7c0 | 89 | static int transport_generic_get_mem(struct se_cmd *cmd); |
c66ac9db | 90 | static int transport_generic_remove(struct se_cmd *cmd, |
35462975 | 91 | int session_reinstatement); |
c66ac9db NB |
92 | static void transport_release_fe_cmd(struct se_cmd *cmd); |
93 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
94 | struct se_queue_obj *qobj); | |
95 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | |
96 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | |
97 | ||
e3d6f909 | 98 | int init_se_kmem_caches(void) |
c66ac9db | 99 | { |
c66ac9db NB |
100 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
101 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | |
6708bb27 AG |
102 | if (!se_cmd_cache) { |
103 | pr_err("kmem_cache_create for struct se_cmd failed\n"); | |
c66ac9db NB |
104 | goto out; |
105 | } | |
106 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | |
107 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | |
108 | 0, NULL); | |
6708bb27 AG |
109 | if (!se_tmr_req_cache) { |
110 | pr_err("kmem_cache_create() for struct se_tmr_req" | |
c66ac9db NB |
111 | " failed\n"); |
112 | goto out; | |
113 | } | |
114 | se_sess_cache = kmem_cache_create("se_sess_cache", | |
115 | sizeof(struct se_session), __alignof__(struct se_session), | |
116 | 0, NULL); | |
6708bb27 AG |
117 | if (!se_sess_cache) { |
118 | pr_err("kmem_cache_create() for struct se_session" | |
c66ac9db NB |
119 | " failed\n"); |
120 | goto out; | |
121 | } | |
122 | se_ua_cache = kmem_cache_create("se_ua_cache", | |
123 | sizeof(struct se_ua), __alignof__(struct se_ua), | |
124 | 0, NULL); | |
6708bb27 AG |
125 | if (!se_ua_cache) { |
126 | pr_err("kmem_cache_create() for struct se_ua failed\n"); | |
c66ac9db NB |
127 | goto out; |
128 | } | |
c66ac9db NB |
129 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
130 | sizeof(struct t10_pr_registration), | |
131 | __alignof__(struct t10_pr_registration), 0, NULL); | |
6708bb27 AG |
132 | if (!t10_pr_reg_cache) { |
133 | pr_err("kmem_cache_create() for struct t10_pr_registration" | |
c66ac9db NB |
134 | " failed\n"); |
135 | goto out; | |
136 | } | |
137 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | |
138 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | |
139 | 0, NULL); | |
6708bb27 AG |
140 | if (!t10_alua_lu_gp_cache) { |
141 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" | |
c66ac9db NB |
142 | " failed\n"); |
143 | goto out; | |
144 | } | |
145 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | |
146 | sizeof(struct t10_alua_lu_gp_member), | |
147 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | |
6708bb27 AG |
148 | if (!t10_alua_lu_gp_mem_cache) { |
149 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" | |
c66ac9db NB |
150 | "cache failed\n"); |
151 | goto out; | |
152 | } | |
153 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | |
154 | sizeof(struct t10_alua_tg_pt_gp), | |
155 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | |
6708bb27 AG |
156 | if (!t10_alua_tg_pt_gp_cache) { |
157 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db NB |
158 | "cache failed\n"); |
159 | goto out; | |
160 | } | |
161 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | |
162 | "t10_alua_tg_pt_gp_mem_cache", | |
163 | sizeof(struct t10_alua_tg_pt_gp_member), | |
164 | __alignof__(struct t10_alua_tg_pt_gp_member), | |
165 | 0, NULL); | |
6708bb27 AG |
166 | if (!t10_alua_tg_pt_gp_mem_cache) { |
167 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db NB |
168 | "mem_t failed\n"); |
169 | goto out; | |
170 | } | |
171 | ||
c66ac9db NB |
172 | return 0; |
173 | out: | |
174 | if (se_cmd_cache) | |
175 | kmem_cache_destroy(se_cmd_cache); | |
176 | if (se_tmr_req_cache) | |
177 | kmem_cache_destroy(se_tmr_req_cache); | |
178 | if (se_sess_cache) | |
179 | kmem_cache_destroy(se_sess_cache); | |
180 | if (se_ua_cache) | |
181 | kmem_cache_destroy(se_ua_cache); | |
c66ac9db NB |
182 | if (t10_pr_reg_cache) |
183 | kmem_cache_destroy(t10_pr_reg_cache); | |
184 | if (t10_alua_lu_gp_cache) | |
185 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
186 | if (t10_alua_lu_gp_mem_cache) | |
187 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
188 | if (t10_alua_tg_pt_gp_cache) | |
189 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
190 | if (t10_alua_tg_pt_gp_mem_cache) | |
191 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
e3d6f909 | 192 | return -ENOMEM; |
c66ac9db NB |
193 | } |
194 | ||
e3d6f909 | 195 | void release_se_kmem_caches(void) |
c66ac9db | 196 | { |
c66ac9db NB |
197 | kmem_cache_destroy(se_cmd_cache); |
198 | kmem_cache_destroy(se_tmr_req_cache); | |
199 | kmem_cache_destroy(se_sess_cache); | |
200 | kmem_cache_destroy(se_ua_cache); | |
c66ac9db NB |
201 | kmem_cache_destroy(t10_pr_reg_cache); |
202 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
203 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
204 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
205 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
c66ac9db NB |
206 | } |
207 | ||
e3d6f909 AG |
208 | /* This code ensures unique mib indexes are handed out. */ |
209 | static DEFINE_SPINLOCK(scsi_mib_index_lock); | |
210 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | |
e89d15ee NB |
211 | |
212 | /* | |
213 | * Allocate a new row index for the entry type specified | |
214 | */ | |
215 | u32 scsi_get_new_index(scsi_index_t type) | |
216 | { | |
217 | u32 new_index; | |
218 | ||
e3d6f909 | 219 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
e89d15ee | 220 | |
e3d6f909 AG |
221 | spin_lock(&scsi_mib_index_lock); |
222 | new_index = ++scsi_mib_index[type]; | |
223 | spin_unlock(&scsi_mib_index_lock); | |
e89d15ee NB |
224 | |
225 | return new_index; | |
226 | } | |
227 | ||
c66ac9db NB |
228 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
229 | { | |
230 | atomic_set(&qobj->queue_cnt, 0); | |
231 | INIT_LIST_HEAD(&qobj->qobj_list); | |
232 | init_waitqueue_head(&qobj->thread_wq); | |
233 | spin_lock_init(&qobj->cmd_queue_lock); | |
234 | } | |
235 | EXPORT_SYMBOL(transport_init_queue_obj); | |
236 | ||
237 | static int transport_subsystem_reqmods(void) | |
238 | { | |
239 | int ret; | |
240 | ||
241 | ret = request_module("target_core_iblock"); | |
242 | if (ret != 0) | |
6708bb27 | 243 | pr_err("Unable to load target_core_iblock\n"); |
c66ac9db NB |
244 | |
245 | ret = request_module("target_core_file"); | |
246 | if (ret != 0) | |
6708bb27 | 247 | pr_err("Unable to load target_core_file\n"); |
c66ac9db NB |
248 | |
249 | ret = request_module("target_core_pscsi"); | |
250 | if (ret != 0) | |
6708bb27 | 251 | pr_err("Unable to load target_core_pscsi\n"); |
c66ac9db NB |
252 | |
253 | ret = request_module("target_core_stgt"); | |
254 | if (ret != 0) | |
6708bb27 | 255 | pr_err("Unable to load target_core_stgt\n"); |
c66ac9db NB |
256 | |
257 | return 0; | |
258 | } | |
259 | ||
260 | int transport_subsystem_check_init(void) | |
261 | { | |
e3d6f909 AG |
262 | int ret; |
263 | ||
264 | if (sub_api_initialized) | |
c66ac9db NB |
265 | return 0; |
266 | /* | |
267 | * Request the loading of known TCM subsystem plugins.. | |
268 | */ | |
e3d6f909 AG |
269 | ret = transport_subsystem_reqmods(); |
270 | if (ret < 0) | |
271 | return ret; | |
c66ac9db | 272 | |
e3d6f909 | 273 | sub_api_initialized = 1; |
c66ac9db NB |
274 | return 0; |
275 | } | |
276 | ||
277 | struct se_session *transport_init_session(void) | |
278 | { | |
279 | struct se_session *se_sess; | |
280 | ||
281 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | |
6708bb27 AG |
282 | if (!se_sess) { |
283 | pr_err("Unable to allocate struct se_session from" | |
c66ac9db NB |
284 | " se_sess_cache\n"); |
285 | return ERR_PTR(-ENOMEM); | |
286 | } | |
287 | INIT_LIST_HEAD(&se_sess->sess_list); | |
288 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | |
c66ac9db NB |
289 | |
290 | return se_sess; | |
291 | } | |
292 | EXPORT_SYMBOL(transport_init_session); | |
293 | ||
294 | /* | |
295 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | |
296 | */ | |
297 | void __transport_register_session( | |
298 | struct se_portal_group *se_tpg, | |
299 | struct se_node_acl *se_nacl, | |
300 | struct se_session *se_sess, | |
301 | void *fabric_sess_ptr) | |
302 | { | |
303 | unsigned char buf[PR_REG_ISID_LEN]; | |
304 | ||
305 | se_sess->se_tpg = se_tpg; | |
306 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | |
307 | /* | |
308 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | |
309 | * | |
310 | * Only set for struct se_session's that will actually be moving I/O. | |
311 | * eg: *NOT* discovery sessions. | |
312 | */ | |
313 | if (se_nacl) { | |
314 | /* | |
315 | * If the fabric module supports an ISID based TransportID, | |
316 | * save this value in binary from the fabric I_T Nexus now. | |
317 | */ | |
e3d6f909 | 318 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
c66ac9db | 319 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
e3d6f909 | 320 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
c66ac9db NB |
321 | &buf[0], PR_REG_ISID_LEN); |
322 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | |
323 | } | |
324 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
325 | /* | |
326 | * The se_nacl->nacl_sess pointer will be set to the | |
327 | * last active I_T Nexus for each struct se_node_acl. | |
328 | */ | |
329 | se_nacl->nacl_sess = se_sess; | |
330 | ||
331 | list_add_tail(&se_sess->sess_acl_list, | |
332 | &se_nacl->acl_sess_list); | |
333 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
334 | } | |
335 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | |
336 | ||
6708bb27 | 337 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
e3d6f909 | 338 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
c66ac9db NB |
339 | } |
340 | EXPORT_SYMBOL(__transport_register_session); | |
341 | ||
342 | void transport_register_session( | |
343 | struct se_portal_group *se_tpg, | |
344 | struct se_node_acl *se_nacl, | |
345 | struct se_session *se_sess, | |
346 | void *fabric_sess_ptr) | |
347 | { | |
348 | spin_lock_bh(&se_tpg->session_lock); | |
349 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | |
350 | spin_unlock_bh(&se_tpg->session_lock); | |
351 | } | |
352 | EXPORT_SYMBOL(transport_register_session); | |
353 | ||
354 | void transport_deregister_session_configfs(struct se_session *se_sess) | |
355 | { | |
356 | struct se_node_acl *se_nacl; | |
23388864 | 357 | unsigned long flags; |
c66ac9db NB |
358 | /* |
359 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | |
360 | */ | |
361 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 362 | if (se_nacl) { |
23388864 | 363 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
364 | list_del(&se_sess->sess_acl_list); |
365 | /* | |
366 | * If the session list is empty, then clear the pointer. | |
367 | * Otherwise, set the struct se_session pointer from the tail | |
368 | * element of the per struct se_node_acl active session list. | |
369 | */ | |
370 | if (list_empty(&se_nacl->acl_sess_list)) | |
371 | se_nacl->nacl_sess = NULL; | |
372 | else { | |
373 | se_nacl->nacl_sess = container_of( | |
374 | se_nacl->acl_sess_list.prev, | |
375 | struct se_session, sess_acl_list); | |
376 | } | |
23388864 | 377 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
378 | } |
379 | } | |
380 | EXPORT_SYMBOL(transport_deregister_session_configfs); | |
381 | ||
382 | void transport_free_session(struct se_session *se_sess) | |
383 | { | |
384 | kmem_cache_free(se_sess_cache, se_sess); | |
385 | } | |
386 | EXPORT_SYMBOL(transport_free_session); | |
387 | ||
388 | void transport_deregister_session(struct se_session *se_sess) | |
389 | { | |
390 | struct se_portal_group *se_tpg = se_sess->se_tpg; | |
391 | struct se_node_acl *se_nacl; | |
392 | ||
6708bb27 | 393 | if (!se_tpg) { |
c66ac9db NB |
394 | transport_free_session(se_sess); |
395 | return; | |
396 | } | |
c66ac9db NB |
397 | |
398 | spin_lock_bh(&se_tpg->session_lock); | |
399 | list_del(&se_sess->sess_list); | |
400 | se_sess->se_tpg = NULL; | |
401 | se_sess->fabric_sess_ptr = NULL; | |
402 | spin_unlock_bh(&se_tpg->session_lock); | |
403 | ||
404 | /* | |
405 | * Determine if we need to do extra work for this initiator node's | |
406 | * struct se_node_acl if it had been previously dynamically generated. | |
407 | */ | |
408 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 409 | if (se_nacl) { |
c66ac9db NB |
410 | spin_lock_bh(&se_tpg->acl_node_lock); |
411 | if (se_nacl->dynamic_node_acl) { | |
6708bb27 AG |
412 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
413 | se_tpg)) { | |
c66ac9db NB |
414 | list_del(&se_nacl->acl_list); |
415 | se_tpg->num_node_acls--; | |
416 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
417 | ||
418 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | |
c66ac9db | 419 | core_free_device_list_for_node(se_nacl, se_tpg); |
e3d6f909 | 420 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
c66ac9db NB |
421 | se_nacl); |
422 | spin_lock_bh(&se_tpg->acl_node_lock); | |
423 | } | |
424 | } | |
425 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
426 | } | |
427 | ||
428 | transport_free_session(se_sess); | |
429 | ||
6708bb27 | 430 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
e3d6f909 | 431 | se_tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
432 | } |
433 | EXPORT_SYMBOL(transport_deregister_session); | |
434 | ||
435 | /* | |
a1d8b49a | 436 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
437 | */ |
438 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |
439 | { | |
440 | struct se_device *dev; | |
441 | struct se_task *task; | |
442 | unsigned long flags; | |
443 | ||
a1d8b49a | 444 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db | 445 | dev = task->se_dev; |
6708bb27 | 446 | if (!dev) |
c66ac9db NB |
447 | continue; |
448 | ||
449 | if (atomic_read(&task->task_active)) | |
450 | continue; | |
451 | ||
6708bb27 | 452 | if (!atomic_read(&task->task_state_active)) |
c66ac9db NB |
453 | continue; |
454 | ||
455 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
456 | list_del(&task->t_state_list); | |
6708bb27 AG |
457 | pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", |
458 | cmd->se_tfo->get_task_tag(cmd), dev, task); | |
c66ac9db NB |
459 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
460 | ||
461 | atomic_set(&task->task_state_active, 0); | |
a1d8b49a | 462 | atomic_dec(&cmd->t_task_cdbs_ex_left); |
c66ac9db NB |
463 | } |
464 | } | |
465 | ||
466 | /* transport_cmd_check_stop(): | |
467 | * | |
468 | * 'transport_off = 1' determines if t_transport_active should be cleared. | |
469 | * 'transport_off = 2' determines if task_dev_state should be removed. | |
470 | * | |
471 | * A non-zero u8 t_state sets cmd->t_state. | |
472 | * Returns 1 when command is stopped, else 0. | |
473 | */ | |
474 | static int transport_cmd_check_stop( | |
475 | struct se_cmd *cmd, | |
476 | int transport_off, | |
477 | u8 t_state) | |
478 | { | |
479 | unsigned long flags; | |
480 | ||
a1d8b49a | 481 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
482 | /* |
483 | * Determine if IOCTL context caller in requesting the stopping of this | |
484 | * command for LUN shutdown purposes. | |
485 | */ | |
a1d8b49a | 486 | if (atomic_read(&cmd->transport_lun_stop)) { |
6708bb27 | 487 | pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" |
c66ac9db | 488 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 489 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
490 | |
491 | cmd->deferred_t_state = cmd->t_state; | |
492 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
a1d8b49a | 493 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
494 | if (transport_off == 2) |
495 | transport_all_task_dev_remove_state(cmd); | |
a1d8b49a | 496 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 497 | |
a1d8b49a | 498 | complete(&cmd->transport_lun_stop_comp); |
c66ac9db NB |
499 | return 1; |
500 | } | |
501 | /* | |
502 | * Determine if frontend context caller is requesting the stopping of | |
e3d6f909 | 503 | * this command for frontend exceptions. |
c66ac9db | 504 | */ |
a1d8b49a | 505 | if (atomic_read(&cmd->t_transport_stop)) { |
6708bb27 | 506 | pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" |
c66ac9db | 507 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 508 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
509 | |
510 | cmd->deferred_t_state = cmd->t_state; | |
511 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
512 | if (transport_off == 2) | |
513 | transport_all_task_dev_remove_state(cmd); | |
514 | ||
515 | /* | |
516 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | |
517 | * to FE. | |
518 | */ | |
519 | if (transport_off == 2) | |
520 | cmd->se_lun = NULL; | |
a1d8b49a | 521 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 522 | |
a1d8b49a | 523 | complete(&cmd->t_transport_stop_comp); |
c66ac9db NB |
524 | return 1; |
525 | } | |
526 | if (transport_off) { | |
a1d8b49a | 527 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
528 | if (transport_off == 2) { |
529 | transport_all_task_dev_remove_state(cmd); | |
530 | /* | |
531 | * Clear struct se_cmd->se_lun before the transport_off == 2 | |
532 | * handoff to fabric module. | |
533 | */ | |
534 | cmd->se_lun = NULL; | |
535 | /* | |
536 | * Some fabric modules like tcm_loop can release | |
25985edc | 537 | * their internally allocated I/O reference now and |
c66ac9db NB |
538 | * struct se_cmd now. |
539 | */ | |
e3d6f909 | 540 | if (cmd->se_tfo->check_stop_free != NULL) { |
c66ac9db | 541 | spin_unlock_irqrestore( |
a1d8b49a | 542 | &cmd->t_state_lock, flags); |
c66ac9db | 543 | |
e3d6f909 | 544 | cmd->se_tfo->check_stop_free(cmd); |
c66ac9db NB |
545 | return 1; |
546 | } | |
547 | } | |
a1d8b49a | 548 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
549 | |
550 | return 0; | |
551 | } else if (t_state) | |
552 | cmd->t_state = t_state; | |
a1d8b49a | 553 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
554 | |
555 | return 0; | |
556 | } | |
557 | ||
558 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |
559 | { | |
560 | return transport_cmd_check_stop(cmd, 2, 0); | |
561 | } | |
562 | ||
563 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | |
564 | { | |
e3d6f909 | 565 | struct se_lun *lun = cmd->se_lun; |
c66ac9db NB |
566 | unsigned long flags; |
567 | ||
568 | if (!lun) | |
569 | return; | |
570 | ||
a1d8b49a | 571 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 572 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 573 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
574 | goto check_lun; |
575 | } | |
a1d8b49a | 576 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 577 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 578 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 579 | |
c66ac9db NB |
580 | |
581 | check_lun: | |
582 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | |
a1d8b49a | 583 | if (atomic_read(&cmd->transport_lun_active)) { |
5951146d | 584 | list_del(&cmd->se_lun_node); |
a1d8b49a | 585 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db | 586 | #if 0 |
6708bb27 | 587 | pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" |
e3d6f909 | 588 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); |
c66ac9db NB |
589 | #endif |
590 | } | |
591 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | |
592 | } | |
593 | ||
594 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |
595 | { | |
5951146d | 596 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
597 | transport_lun_remove_cmd(cmd); |
598 | ||
599 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
600 | return; | |
601 | if (remove) | |
35462975 | 602 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
603 | } |
604 | ||
605 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | |
606 | { | |
5951146d | 607 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
608 | |
609 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
610 | return; | |
611 | ||
35462975 | 612 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
613 | } |
614 | ||
5951146d | 615 | static void transport_add_cmd_to_queue( |
c66ac9db NB |
616 | struct se_cmd *cmd, |
617 | int t_state) | |
618 | { | |
619 | struct se_device *dev = cmd->se_dev; | |
e3d6f909 | 620 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
c66ac9db NB |
621 | unsigned long flags; |
622 | ||
5951146d | 623 | INIT_LIST_HEAD(&cmd->se_queue_node); |
c66ac9db NB |
624 | |
625 | if (t_state) { | |
a1d8b49a | 626 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 627 | cmd->t_state = t_state; |
a1d8b49a AG |
628 | atomic_set(&cmd->t_transport_active, 1); |
629 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
630 | } |
631 | ||
632 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
07bde79a NB |
633 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { |
634 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; | |
635 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | |
636 | } else | |
637 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | |
a1d8b49a | 638 | atomic_inc(&cmd->t_transport_queue_active); |
c66ac9db NB |
639 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
640 | ||
641 | atomic_inc(&qobj->queue_cnt); | |
642 | wake_up_interruptible(&qobj->thread_wq); | |
c66ac9db NB |
643 | } |
644 | ||
5951146d AG |
645 | static struct se_cmd * |
646 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |
c66ac9db | 647 | { |
5951146d | 648 | struct se_cmd *cmd; |
c66ac9db NB |
649 | unsigned long flags; |
650 | ||
651 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
652 | if (list_empty(&qobj->qobj_list)) { | |
653 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
654 | return NULL; | |
655 | } | |
5951146d | 656 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
c66ac9db | 657 | |
a1d8b49a | 658 | atomic_dec(&cmd->t_transport_queue_active); |
c66ac9db | 659 | |
5951146d | 660 | list_del(&cmd->se_queue_node); |
c66ac9db NB |
661 | atomic_dec(&qobj->queue_cnt); |
662 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
663 | ||
5951146d | 664 | return cmd; |
c66ac9db NB |
665 | } |
666 | ||
667 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
668 | struct se_queue_obj *qobj) | |
669 | { | |
5951146d | 670 | struct se_cmd *t; |
c66ac9db NB |
671 | unsigned long flags; |
672 | ||
673 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
6708bb27 | 674 | if (!atomic_read(&cmd->t_transport_queue_active)) { |
c66ac9db NB |
675 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
676 | return; | |
677 | } | |
678 | ||
5951146d AG |
679 | list_for_each_entry(t, &qobj->qobj_list, se_queue_node) |
680 | if (t == cmd) { | |
a1d8b49a | 681 | atomic_dec(&cmd->t_transport_queue_active); |
5951146d AG |
682 | atomic_dec(&qobj->queue_cnt); |
683 | list_del(&cmd->se_queue_node); | |
684 | break; | |
685 | } | |
c66ac9db NB |
686 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
687 | ||
a1d8b49a | 688 | if (atomic_read(&cmd->t_transport_queue_active)) { |
6708bb27 | 689 | pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", |
e3d6f909 | 690 | cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 691 | atomic_read(&cmd->t_transport_queue_active)); |
c66ac9db NB |
692 | } |
693 | } | |
694 | ||
695 | /* | |
696 | * Completion function used by TCM subsystem plugins (such as FILEIO) | |
697 | * for queueing up response from struct se_subsystem_api->do_task() | |
698 | */ | |
699 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |
700 | { | |
a1d8b49a | 701 | struct se_task *task = list_entry(cmd->t_task_list.next, |
c66ac9db NB |
702 | struct se_task, t_list); |
703 | ||
704 | if (good) { | |
705 | cmd->scsi_status = SAM_STAT_GOOD; | |
706 | task->task_scsi_status = GOOD; | |
707 | } else { | |
708 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | |
709 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | |
e3d6f909 | 710 | task->task_se_cmd->transport_error_status = |
c66ac9db NB |
711 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
712 | } | |
713 | ||
714 | transport_complete_task(task, good); | |
715 | } | |
716 | EXPORT_SYMBOL(transport_complete_sync_cache); | |
717 | ||
718 | /* transport_complete_task(): | |
719 | * | |
720 | * Called from interrupt and non interrupt context depending | |
721 | * on the transport plugin. | |
722 | */ | |
723 | void transport_complete_task(struct se_task *task, int success) | |
724 | { | |
e3d6f909 | 725 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
726 | struct se_device *dev = task->se_dev; |
727 | int t_state; | |
728 | unsigned long flags; | |
729 | #if 0 | |
6708bb27 | 730 | pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
a1d8b49a | 731 | cmd->t_task_cdb[0], dev); |
c66ac9db | 732 | #endif |
e3d6f909 | 733 | if (dev) |
c66ac9db | 734 | atomic_inc(&dev->depth_left); |
c66ac9db | 735 | |
a1d8b49a | 736 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
737 | atomic_set(&task->task_active, 0); |
738 | ||
739 | /* | |
740 | * See if any sense data exists, if so set the TASK_SENSE flag. | |
741 | * Also check for any other post completion work that needs to be | |
742 | * done by the plugins. | |
743 | */ | |
744 | if (dev && dev->transport->transport_complete) { | |
745 | if (dev->transport->transport_complete(task) != 0) { | |
746 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | |
747 | task->task_sense = 1; | |
748 | success = 1; | |
749 | } | |
750 | } | |
751 | ||
752 | /* | |
753 | * See if we are waiting for outstanding struct se_task | |
754 | * to complete for an exception condition | |
755 | */ | |
756 | if (atomic_read(&task->task_stop)) { | |
757 | /* | |
a1d8b49a | 758 | * Decrement cmd->t_se_count if this task had |
c66ac9db NB |
759 | * previously thrown its timeout exception handler. |
760 | */ | |
761 | if (atomic_read(&task->task_timeout)) { | |
a1d8b49a | 762 | atomic_dec(&cmd->t_se_count); |
c66ac9db NB |
763 | atomic_set(&task->task_timeout, 0); |
764 | } | |
a1d8b49a | 765 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
766 | |
767 | complete(&task->task_stop_comp); | |
768 | return; | |
769 | } | |
770 | /* | |
771 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | |
772 | * left counter to determine when the struct se_cmd is ready to be queued to | |
773 | * the processing thread. | |
774 | */ | |
775 | if (atomic_read(&task->task_timeout)) { | |
6708bb27 AG |
776 | if (!atomic_dec_and_test( |
777 | &cmd->t_task_cdbs_timeout_left)) { | |
a1d8b49a | 778 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
779 | flags); |
780 | return; | |
781 | } | |
782 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | |
a1d8b49a | 783 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
784 | |
785 | transport_add_cmd_to_queue(cmd, t_state); | |
786 | return; | |
787 | } | |
a1d8b49a | 788 | atomic_dec(&cmd->t_task_cdbs_timeout_left); |
c66ac9db NB |
789 | |
790 | /* | |
791 | * Decrement the outstanding t_task_cdbs_left count. The last | |
792 | * struct se_task from struct se_cmd will complete itself into the | |
793 | * device queue depending upon int success. | |
794 | */ | |
6708bb27 | 795 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
c66ac9db | 796 | if (!success) |
a1d8b49a | 797 | cmd->t_tasks_failed = 1; |
c66ac9db | 798 | |
a1d8b49a | 799 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
800 | return; |
801 | } | |
802 | ||
a1d8b49a | 803 | if (!success || cmd->t_tasks_failed) { |
c66ac9db NB |
804 | t_state = TRANSPORT_COMPLETE_FAILURE; |
805 | if (!task->task_error_status) { | |
806 | task->task_error_status = | |
807 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
808 | cmd->transport_error_status = | |
809 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
810 | } | |
811 | } else { | |
a1d8b49a | 812 | atomic_set(&cmd->t_transport_complete, 1); |
c66ac9db NB |
813 | t_state = TRANSPORT_COMPLETE_OK; |
814 | } | |
a1d8b49a | 815 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
816 | |
817 | transport_add_cmd_to_queue(cmd, t_state); | |
818 | } | |
819 | EXPORT_SYMBOL(transport_complete_task); | |
820 | ||
821 | /* | |
822 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | |
823 | * struct se_task list are ready to be added to the active execution list | |
824 | * struct se_device | |
825 | ||
826 | * Called with se_dev_t->execute_task_lock called. | |
827 | */ | |
828 | static inline int transport_add_task_check_sam_attr( | |
829 | struct se_task *task, | |
830 | struct se_task *task_prev, | |
831 | struct se_device *dev) | |
832 | { | |
833 | /* | |
834 | * No SAM Task attribute emulation enabled, add to tail of | |
835 | * execution queue | |
836 | */ | |
837 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | |
838 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
839 | return 0; | |
840 | } | |
841 | /* | |
842 | * HEAD_OF_QUEUE attribute for received CDB, which means | |
843 | * the first task that is associated with a struct se_cmd goes to | |
844 | * head of the struct se_device->execute_task_list, and task_prev | |
845 | * after that for each subsequent task | |
846 | */ | |
e66ecd50 | 847 | if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
848 | list_add(&task->t_execute_list, |
849 | (task_prev != NULL) ? | |
850 | &task_prev->t_execute_list : | |
851 | &dev->execute_task_list); | |
852 | ||
6708bb27 | 853 | pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" |
c66ac9db | 854 | " in execution queue\n", |
6708bb27 | 855 | task->task_se_cmd->t_task_cdb[0]); |
c66ac9db NB |
856 | return 1; |
857 | } | |
858 | /* | |
859 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | |
860 | * transitioned from Dermant -> Active state, and are added to the end | |
861 | * of the struct se_device->execute_task_list | |
862 | */ | |
863 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
864 | return 0; | |
865 | } | |
866 | ||
867 | /* __transport_add_task_to_execute_queue(): | |
868 | * | |
869 | * Called with se_dev_t->execute_task_lock called. | |
870 | */ | |
871 | static void __transport_add_task_to_execute_queue( | |
872 | struct se_task *task, | |
873 | struct se_task *task_prev, | |
874 | struct se_device *dev) | |
875 | { | |
876 | int head_of_queue; | |
877 | ||
878 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | |
879 | atomic_inc(&dev->execute_tasks); | |
880 | ||
881 | if (atomic_read(&task->task_state_active)) | |
882 | return; | |
883 | /* | |
884 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | |
885 | * state list as well. Running with SAM Task Attribute emulation | |
886 | * will always return head_of_queue == 0 here | |
887 | */ | |
888 | if (head_of_queue) | |
889 | list_add(&task->t_state_list, (task_prev) ? | |
890 | &task_prev->t_state_list : | |
891 | &dev->state_task_list); | |
892 | else | |
893 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
894 | ||
895 | atomic_set(&task->task_state_active, 1); | |
896 | ||
6708bb27 | 897 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
e3d6f909 | 898 | task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), |
c66ac9db NB |
899 | task, dev); |
900 | } | |
901 | ||
902 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |
903 | { | |
904 | struct se_device *dev; | |
905 | struct se_task *task; | |
906 | unsigned long flags; | |
907 | ||
a1d8b49a AG |
908 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
909 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | |
c66ac9db NB |
910 | dev = task->se_dev; |
911 | ||
912 | if (atomic_read(&task->task_state_active)) | |
913 | continue; | |
914 | ||
915 | spin_lock(&dev->execute_task_lock); | |
916 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
917 | atomic_set(&task->task_state_active, 1); | |
918 | ||
6708bb27 AG |
919 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
920 | task->task_se_cmd->se_tfo->get_task_tag( | |
c66ac9db NB |
921 | task->task_se_cmd), task, dev); |
922 | ||
923 | spin_unlock(&dev->execute_task_lock); | |
924 | } | |
a1d8b49a | 925 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
926 | } |
927 | ||
928 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |
929 | { | |
5951146d | 930 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
931 | struct se_task *task, *task_prev = NULL; |
932 | unsigned long flags; | |
933 | ||
934 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
a1d8b49a | 935 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db NB |
936 | if (atomic_read(&task->task_execute_queue)) |
937 | continue; | |
938 | /* | |
939 | * __transport_add_task_to_execute_queue() handles the | |
940 | * SAM Task Attribute emulation if enabled | |
941 | */ | |
942 | __transport_add_task_to_execute_queue(task, task_prev, dev); | |
943 | atomic_set(&task->task_execute_queue, 1); | |
944 | task_prev = task; | |
945 | } | |
946 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
c66ac9db NB |
947 | } |
948 | ||
949 | /* transport_remove_task_from_execute_queue(): | |
950 | * | |
951 | * | |
952 | */ | |
52208ae3 | 953 | void transport_remove_task_from_execute_queue( |
c66ac9db NB |
954 | struct se_task *task, |
955 | struct se_device *dev) | |
956 | { | |
957 | unsigned long flags; | |
958 | ||
af57c3ac NB |
959 | if (atomic_read(&task->task_execute_queue) == 0) { |
960 | dump_stack(); | |
961 | return; | |
962 | } | |
963 | ||
c66ac9db NB |
964 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
965 | list_del(&task->t_execute_list); | |
af57c3ac | 966 | atomic_set(&task->task_execute_queue, 0); |
c66ac9db NB |
967 | atomic_dec(&dev->execute_tasks); |
968 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
969 | } | |
970 | ||
07bde79a NB |
971 | /* |
972 | * Handle QUEUE_FULL / -EAGAIN status | |
973 | */ | |
974 | ||
975 | static void target_qf_do_work(struct work_struct *work) | |
976 | { | |
977 | struct se_device *dev = container_of(work, struct se_device, | |
978 | qf_work_queue); | |
979 | struct se_cmd *cmd, *cmd_tmp; | |
980 | ||
981 | spin_lock_irq(&dev->qf_cmd_lock); | |
982 | list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { | |
983 | ||
984 | list_del(&cmd->se_qf_node); | |
985 | atomic_dec(&dev->dev_qf_count); | |
986 | smp_mb__after_atomic_dec(); | |
987 | spin_unlock_irq(&dev->qf_cmd_lock); | |
988 | ||
6708bb27 | 989 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
07bde79a NB |
990 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
991 | (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : | |
992 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | |
993 | : "UNKNOWN"); | |
994 | /* | |
995 | * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd | |
996 | * has been added to head of queue | |
997 | */ | |
998 | transport_add_cmd_to_queue(cmd, cmd->t_state); | |
999 | ||
1000 | spin_lock_irq(&dev->qf_cmd_lock); | |
1001 | } | |
1002 | spin_unlock_irq(&dev->qf_cmd_lock); | |
1003 | } | |
1004 | ||
c66ac9db NB |
1005 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
1006 | { | |
1007 | switch (cmd->data_direction) { | |
1008 | case DMA_NONE: | |
1009 | return "NONE"; | |
1010 | case DMA_FROM_DEVICE: | |
1011 | return "READ"; | |
1012 | case DMA_TO_DEVICE: | |
1013 | return "WRITE"; | |
1014 | case DMA_BIDIRECTIONAL: | |
1015 | return "BIDI"; | |
1016 | default: | |
1017 | break; | |
1018 | } | |
1019 | ||
1020 | return "UNKNOWN"; | |
1021 | } | |
1022 | ||
1023 | void transport_dump_dev_state( | |
1024 | struct se_device *dev, | |
1025 | char *b, | |
1026 | int *bl) | |
1027 | { | |
1028 | *bl += sprintf(b + *bl, "Status: "); | |
1029 | switch (dev->dev_status) { | |
1030 | case TRANSPORT_DEVICE_ACTIVATED: | |
1031 | *bl += sprintf(b + *bl, "ACTIVATED"); | |
1032 | break; | |
1033 | case TRANSPORT_DEVICE_DEACTIVATED: | |
1034 | *bl += sprintf(b + *bl, "DEACTIVATED"); | |
1035 | break; | |
1036 | case TRANSPORT_DEVICE_SHUTDOWN: | |
1037 | *bl += sprintf(b + *bl, "SHUTDOWN"); | |
1038 | break; | |
1039 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | |
1040 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | |
1041 | *bl += sprintf(b + *bl, "OFFLINE"); | |
1042 | break; | |
1043 | default: | |
1044 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | |
1045 | break; | |
1046 | } | |
1047 | ||
1048 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | |
1049 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | |
1050 | dev->queue_depth); | |
1051 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | |
e3d6f909 | 1052 | dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db NB |
1053 | *bl += sprintf(b + *bl, " "); |
1054 | } | |
1055 | ||
1056 | /* transport_release_all_cmds(): | |
1057 | * | |
1058 | * | |
1059 | */ | |
1060 | static void transport_release_all_cmds(struct se_device *dev) | |
1061 | { | |
5951146d | 1062 | struct se_cmd *cmd, *tcmd; |
c66ac9db NB |
1063 | int bug_out = 0, t_state; |
1064 | unsigned long flags; | |
1065 | ||
e3d6f909 | 1066 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
5951146d AG |
1067 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, |
1068 | se_queue_node) { | |
1069 | t_state = cmd->t_state; | |
1070 | list_del(&cmd->se_queue_node); | |
e3d6f909 | 1071 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, |
c66ac9db NB |
1072 | flags); |
1073 | ||
6708bb27 | 1074 | pr_err("Releasing ITT: 0x%08x, i_state: %u," |
c66ac9db | 1075 | " t_state: %u directly\n", |
e3d6f909 AG |
1076 | cmd->se_tfo->get_task_tag(cmd), |
1077 | cmd->se_tfo->get_cmd_state(cmd), t_state); | |
c66ac9db NB |
1078 | |
1079 | transport_release_fe_cmd(cmd); | |
1080 | bug_out = 1; | |
1081 | ||
e3d6f909 | 1082 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db | 1083 | } |
e3d6f909 | 1084 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db NB |
1085 | #if 0 |
1086 | if (bug_out) | |
1087 | BUG(); | |
1088 | #endif | |
1089 | } | |
1090 | ||
1091 | void transport_dump_vpd_proto_id( | |
1092 | struct t10_vpd *vpd, | |
1093 | unsigned char *p_buf, | |
1094 | int p_buf_len) | |
1095 | { | |
1096 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1097 | int len; | |
1098 | ||
1099 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1100 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | |
1101 | ||
1102 | switch (vpd->protocol_identifier) { | |
1103 | case 0x00: | |
1104 | sprintf(buf+len, "Fibre Channel\n"); | |
1105 | break; | |
1106 | case 0x10: | |
1107 | sprintf(buf+len, "Parallel SCSI\n"); | |
1108 | break; | |
1109 | case 0x20: | |
1110 | sprintf(buf+len, "SSA\n"); | |
1111 | break; | |
1112 | case 0x30: | |
1113 | sprintf(buf+len, "IEEE 1394\n"); | |
1114 | break; | |
1115 | case 0x40: | |
1116 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | |
1117 | " Protocol\n"); | |
1118 | break; | |
1119 | case 0x50: | |
1120 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | |
1121 | break; | |
1122 | case 0x60: | |
1123 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | |
1124 | break; | |
1125 | case 0x70: | |
1126 | sprintf(buf+len, "Automation/Drive Interface Transport" | |
1127 | " Protocol\n"); | |
1128 | break; | |
1129 | case 0x80: | |
1130 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | |
1131 | break; | |
1132 | default: | |
1133 | sprintf(buf+len, "Unknown 0x%02x\n", | |
1134 | vpd->protocol_identifier); | |
1135 | break; | |
1136 | } | |
1137 | ||
1138 | if (p_buf) | |
1139 | strncpy(p_buf, buf, p_buf_len); | |
1140 | else | |
6708bb27 | 1141 | pr_debug("%s", buf); |
c66ac9db NB |
1142 | } |
1143 | ||
1144 | void | |
1145 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | |
1146 | { | |
1147 | /* | |
1148 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | |
1149 | * | |
1150 | * from spc3r23.pdf section 7.5.1 | |
1151 | */ | |
1152 | if (page_83[1] & 0x80) { | |
1153 | vpd->protocol_identifier = (page_83[0] & 0xf0); | |
1154 | vpd->protocol_identifier_set = 1; | |
1155 | transport_dump_vpd_proto_id(vpd, NULL, 0); | |
1156 | } | |
1157 | } | |
1158 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | |
1159 | ||
1160 | int transport_dump_vpd_assoc( | |
1161 | struct t10_vpd *vpd, | |
1162 | unsigned char *p_buf, | |
1163 | int p_buf_len) | |
1164 | { | |
1165 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1166 | int ret = 0; |
1167 | int len; | |
c66ac9db NB |
1168 | |
1169 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1170 | len = sprintf(buf, "T10 VPD Identifier Association: "); | |
1171 | ||
1172 | switch (vpd->association) { | |
1173 | case 0x00: | |
1174 | sprintf(buf+len, "addressed logical unit\n"); | |
1175 | break; | |
1176 | case 0x10: | |
1177 | sprintf(buf+len, "target port\n"); | |
1178 | break; | |
1179 | case 0x20: | |
1180 | sprintf(buf+len, "SCSI target device\n"); | |
1181 | break; | |
1182 | default: | |
1183 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | |
e3d6f909 | 1184 | ret = -EINVAL; |
c66ac9db NB |
1185 | break; |
1186 | } | |
1187 | ||
1188 | if (p_buf) | |
1189 | strncpy(p_buf, buf, p_buf_len); | |
1190 | else | |
6708bb27 | 1191 | pr_debug("%s", buf); |
c66ac9db NB |
1192 | |
1193 | return ret; | |
1194 | } | |
1195 | ||
1196 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | |
1197 | { | |
1198 | /* | |
1199 | * The VPD identification association.. | |
1200 | * | |
1201 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | |
1202 | */ | |
1203 | vpd->association = (page_83[1] & 0x30); | |
1204 | return transport_dump_vpd_assoc(vpd, NULL, 0); | |
1205 | } | |
1206 | EXPORT_SYMBOL(transport_set_vpd_assoc); | |
1207 | ||
1208 | int transport_dump_vpd_ident_type( | |
1209 | struct t10_vpd *vpd, | |
1210 | unsigned char *p_buf, | |
1211 | int p_buf_len) | |
1212 | { | |
1213 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1214 | int ret = 0; |
1215 | int len; | |
c66ac9db NB |
1216 | |
1217 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1218 | len = sprintf(buf, "T10 VPD Identifier Type: "); | |
1219 | ||
1220 | switch (vpd->device_identifier_type) { | |
1221 | case 0x00: | |
1222 | sprintf(buf+len, "Vendor specific\n"); | |
1223 | break; | |
1224 | case 0x01: | |
1225 | sprintf(buf+len, "T10 Vendor ID based\n"); | |
1226 | break; | |
1227 | case 0x02: | |
1228 | sprintf(buf+len, "EUI-64 based\n"); | |
1229 | break; | |
1230 | case 0x03: | |
1231 | sprintf(buf+len, "NAA\n"); | |
1232 | break; | |
1233 | case 0x04: | |
1234 | sprintf(buf+len, "Relative target port identifier\n"); | |
1235 | break; | |
1236 | case 0x08: | |
1237 | sprintf(buf+len, "SCSI name string\n"); | |
1238 | break; | |
1239 | default: | |
1240 | sprintf(buf+len, "Unsupported: 0x%02x\n", | |
1241 | vpd->device_identifier_type); | |
e3d6f909 | 1242 | ret = -EINVAL; |
c66ac9db NB |
1243 | break; |
1244 | } | |
1245 | ||
e3d6f909 AG |
1246 | if (p_buf) { |
1247 | if (p_buf_len < strlen(buf)+1) | |
1248 | return -EINVAL; | |
c66ac9db | 1249 | strncpy(p_buf, buf, p_buf_len); |
e3d6f909 | 1250 | } else { |
6708bb27 | 1251 | pr_debug("%s", buf); |
e3d6f909 | 1252 | } |
c66ac9db NB |
1253 | |
1254 | return ret; | |
1255 | } | |
1256 | ||
1257 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | |
1258 | { | |
1259 | /* | |
1260 | * The VPD identifier type.. | |
1261 | * | |
1262 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | |
1263 | */ | |
1264 | vpd->device_identifier_type = (page_83[1] & 0x0f); | |
1265 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | |
1266 | } | |
1267 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | |
1268 | ||
1269 | int transport_dump_vpd_ident( | |
1270 | struct t10_vpd *vpd, | |
1271 | unsigned char *p_buf, | |
1272 | int p_buf_len) | |
1273 | { | |
1274 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1275 | int ret = 0; | |
1276 | ||
1277 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1278 | ||
1279 | switch (vpd->device_identifier_code_set) { | |
1280 | case 0x01: /* Binary */ | |
1281 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | |
1282 | &vpd->device_identifier[0]); | |
1283 | break; | |
1284 | case 0x02: /* ASCII */ | |
1285 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | |
1286 | &vpd->device_identifier[0]); | |
1287 | break; | |
1288 | case 0x03: /* UTF-8 */ | |
1289 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | |
1290 | &vpd->device_identifier[0]); | |
1291 | break; | |
1292 | default: | |
1293 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | |
1294 | " 0x%02x", vpd->device_identifier_code_set); | |
e3d6f909 | 1295 | ret = -EINVAL; |
c66ac9db NB |
1296 | break; |
1297 | } | |
1298 | ||
1299 | if (p_buf) | |
1300 | strncpy(p_buf, buf, p_buf_len); | |
1301 | else | |
6708bb27 | 1302 | pr_debug("%s", buf); |
c66ac9db NB |
1303 | |
1304 | return ret; | |
1305 | } | |
1306 | ||
1307 | int | |
1308 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | |
1309 | { | |
1310 | static const char hex_str[] = "0123456789abcdef"; | |
1311 | int j = 0, i = 4; /* offset to start of the identifer */ | |
1312 | ||
1313 | /* | |
1314 | * The VPD Code Set (encoding) | |
1315 | * | |
1316 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | |
1317 | */ | |
1318 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | |
1319 | switch (vpd->device_identifier_code_set) { | |
1320 | case 0x01: /* Binary */ | |
1321 | vpd->device_identifier[j++] = | |
1322 | hex_str[vpd->device_identifier_type]; | |
1323 | while (i < (4 + page_83[3])) { | |
1324 | vpd->device_identifier[j++] = | |
1325 | hex_str[(page_83[i] & 0xf0) >> 4]; | |
1326 | vpd->device_identifier[j++] = | |
1327 | hex_str[page_83[i] & 0x0f]; | |
1328 | i++; | |
1329 | } | |
1330 | break; | |
1331 | case 0x02: /* ASCII */ | |
1332 | case 0x03: /* UTF-8 */ | |
1333 | while (i < (4 + page_83[3])) | |
1334 | vpd->device_identifier[j++] = page_83[i++]; | |
1335 | break; | |
1336 | default: | |
1337 | break; | |
1338 | } | |
1339 | ||
1340 | return transport_dump_vpd_ident(vpd, NULL, 0); | |
1341 | } | |
1342 | EXPORT_SYMBOL(transport_set_vpd_ident); | |
1343 | ||
1344 | static void core_setup_task_attr_emulation(struct se_device *dev) | |
1345 | { | |
1346 | /* | |
1347 | * If this device is from Target_Core_Mod/pSCSI, disable the | |
1348 | * SAM Task Attribute emulation. | |
1349 | * | |
1350 | * This is currently not available in upsream Linux/SCSI Target | |
1351 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | |
1352 | */ | |
e3d6f909 | 1353 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
c66ac9db NB |
1354 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1355 | return; | |
1356 | } | |
1357 | ||
1358 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | |
6708bb27 | 1359 | pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
e3d6f909 AG |
1360 | " device\n", dev->transport->name, |
1361 | dev->transport->get_device_rev(dev)); | |
c66ac9db NB |
1362 | } |
1363 | ||
1364 | static void scsi_dump_inquiry(struct se_device *dev) | |
1365 | { | |
e3d6f909 | 1366 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
c66ac9db NB |
1367 | int i, device_type; |
1368 | /* | |
1369 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1370 | */ | |
6708bb27 | 1371 | pr_debug(" Vendor: "); |
c66ac9db NB |
1372 | for (i = 0; i < 8; i++) |
1373 | if (wwn->vendor[i] >= 0x20) | |
6708bb27 | 1374 | pr_debug("%c", wwn->vendor[i]); |
c66ac9db | 1375 | else |
6708bb27 | 1376 | pr_debug(" "); |
c66ac9db | 1377 | |
6708bb27 | 1378 | pr_debug(" Model: "); |
c66ac9db NB |
1379 | for (i = 0; i < 16; i++) |
1380 | if (wwn->model[i] >= 0x20) | |
6708bb27 | 1381 | pr_debug("%c", wwn->model[i]); |
c66ac9db | 1382 | else |
6708bb27 | 1383 | pr_debug(" "); |
c66ac9db | 1384 | |
6708bb27 | 1385 | pr_debug(" Revision: "); |
c66ac9db NB |
1386 | for (i = 0; i < 4; i++) |
1387 | if (wwn->revision[i] >= 0x20) | |
6708bb27 | 1388 | pr_debug("%c", wwn->revision[i]); |
c66ac9db | 1389 | else |
6708bb27 | 1390 | pr_debug(" "); |
c66ac9db | 1391 | |
6708bb27 | 1392 | pr_debug("\n"); |
c66ac9db | 1393 | |
e3d6f909 | 1394 | device_type = dev->transport->get_device_type(dev); |
6708bb27 AG |
1395 | pr_debug(" Type: %s ", scsi_device_type(device_type)); |
1396 | pr_debug(" ANSI SCSI revision: %02x\n", | |
e3d6f909 | 1397 | dev->transport->get_device_rev(dev)); |
c66ac9db NB |
1398 | } |
1399 | ||
1400 | struct se_device *transport_add_device_to_core_hba( | |
1401 | struct se_hba *hba, | |
1402 | struct se_subsystem_api *transport, | |
1403 | struct se_subsystem_dev *se_dev, | |
1404 | u32 device_flags, | |
1405 | void *transport_dev, | |
1406 | struct se_dev_limits *dev_limits, | |
1407 | const char *inquiry_prod, | |
1408 | const char *inquiry_rev) | |
1409 | { | |
12a18bdc | 1410 | int force_pt; |
c66ac9db NB |
1411 | struct se_device *dev; |
1412 | ||
1413 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | |
6708bb27 AG |
1414 | if (!dev) { |
1415 | pr_err("Unable to allocate memory for se_dev_t\n"); | |
c66ac9db NB |
1416 | return NULL; |
1417 | } | |
c66ac9db | 1418 | |
e3d6f909 | 1419 | transport_init_queue_obj(&dev->dev_queue_obj); |
c66ac9db NB |
1420 | dev->dev_flags = device_flags; |
1421 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
5951146d | 1422 | dev->dev_ptr = transport_dev; |
c66ac9db NB |
1423 | dev->se_hba = hba; |
1424 | dev->se_sub_dev = se_dev; | |
1425 | dev->transport = transport; | |
1426 | atomic_set(&dev->active_cmds, 0); | |
1427 | INIT_LIST_HEAD(&dev->dev_list); | |
1428 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1429 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1430 | INIT_LIST_HEAD(&dev->execute_task_list); | |
1431 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1432 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | |
1433 | INIT_LIST_HEAD(&dev->state_task_list); | |
07bde79a | 1434 | INIT_LIST_HEAD(&dev->qf_cmd_list); |
c66ac9db NB |
1435 | spin_lock_init(&dev->execute_task_lock); |
1436 | spin_lock_init(&dev->delayed_cmd_lock); | |
1437 | spin_lock_init(&dev->ordered_cmd_lock); | |
1438 | spin_lock_init(&dev->state_task_lock); | |
1439 | spin_lock_init(&dev->dev_alua_lock); | |
1440 | spin_lock_init(&dev->dev_reservation_lock); | |
1441 | spin_lock_init(&dev->dev_status_lock); | |
1442 | spin_lock_init(&dev->dev_status_thr_lock); | |
1443 | spin_lock_init(&dev->se_port_lock); | |
1444 | spin_lock_init(&dev->se_tmr_lock); | |
07bde79a | 1445 | spin_lock_init(&dev->qf_cmd_lock); |
c66ac9db NB |
1446 | |
1447 | dev->queue_depth = dev_limits->queue_depth; | |
1448 | atomic_set(&dev->depth_left, dev->queue_depth); | |
1449 | atomic_set(&dev->dev_ordered_id, 0); | |
1450 | ||
1451 | se_dev_set_default_attribs(dev, dev_limits); | |
1452 | ||
1453 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1454 | dev->creation_time = get_jiffies_64(); | |
1455 | spin_lock_init(&dev->stats_lock); | |
1456 | ||
1457 | spin_lock(&hba->device_lock); | |
1458 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | |
1459 | hba->dev_count++; | |
1460 | spin_unlock(&hba->device_lock); | |
1461 | /* | |
1462 | * Setup the SAM Task Attribute emulation for struct se_device | |
1463 | */ | |
1464 | core_setup_task_attr_emulation(dev); | |
1465 | /* | |
1466 | * Force PR and ALUA passthrough emulation with internal object use. | |
1467 | */ | |
1468 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | |
1469 | /* | |
1470 | * Setup the Reservations infrastructure for struct se_device | |
1471 | */ | |
1472 | core_setup_reservations(dev, force_pt); | |
1473 | /* | |
1474 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | |
1475 | */ | |
1476 | if (core_setup_alua(dev, force_pt) < 0) | |
1477 | goto out; | |
1478 | ||
1479 | /* | |
1480 | * Startup the struct se_device processing thread | |
1481 | */ | |
1482 | dev->process_thread = kthread_run(transport_processing_thread, dev, | |
e3d6f909 | 1483 | "LIO_%s", dev->transport->name); |
c66ac9db | 1484 | if (IS_ERR(dev->process_thread)) { |
6708bb27 | 1485 | pr_err("Unable to create kthread: LIO_%s\n", |
e3d6f909 | 1486 | dev->transport->name); |
c66ac9db NB |
1487 | goto out; |
1488 | } | |
07bde79a NB |
1489 | /* |
1490 | * Setup work_queue for QUEUE_FULL | |
1491 | */ | |
1492 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | |
c66ac9db NB |
1493 | /* |
1494 | * Preload the initial INQUIRY const values if we are doing | |
1495 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1496 | * passthrough because this is being provided by the backend LLD. | |
1497 | * This is required so that transport_get_inquiry() copies these | |
1498 | * originals once back into DEV_T10_WWN(dev) for the virtual device | |
1499 | * setup. | |
1500 | */ | |
e3d6f909 | 1501 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
f22c1196 | 1502 | if (!inquiry_prod || !inquiry_rev) { |
6708bb27 | 1503 | pr_err("All non TCM/pSCSI plugins require" |
c66ac9db NB |
1504 | " INQUIRY consts\n"); |
1505 | goto out; | |
1506 | } | |
1507 | ||
e3d6f909 AG |
1508 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1509 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); | |
1510 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); | |
c66ac9db NB |
1511 | } |
1512 | scsi_dump_inquiry(dev); | |
1513 | ||
12a18bdc | 1514 | return dev; |
c66ac9db | 1515 | out: |
c66ac9db NB |
1516 | kthread_stop(dev->process_thread); |
1517 | ||
1518 | spin_lock(&hba->device_lock); | |
1519 | list_del(&dev->dev_list); | |
1520 | hba->dev_count--; | |
1521 | spin_unlock(&hba->device_lock); | |
1522 | ||
1523 | se_release_vpd_for_dev(dev); | |
1524 | ||
c66ac9db NB |
1525 | kfree(dev); |
1526 | ||
1527 | return NULL; | |
1528 | } | |
1529 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | |
1530 | ||
1531 | /* transport_generic_prepare_cdb(): | |
1532 | * | |
1533 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | |
1534 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | |
1535 | * The point of this is since we are mapping iSCSI LUNs to | |
1536 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | |
1537 | * devices and HBAs for a loop. | |
1538 | */ | |
1539 | static inline void transport_generic_prepare_cdb( | |
1540 | unsigned char *cdb) | |
1541 | { | |
1542 | switch (cdb[0]) { | |
1543 | case READ_10: /* SBC - RDProtect */ | |
1544 | case READ_12: /* SBC - RDProtect */ | |
1545 | case READ_16: /* SBC - RDProtect */ | |
1546 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | |
1547 | case VERIFY: /* SBC - VRProtect */ | |
1548 | case VERIFY_16: /* SBC - VRProtect */ | |
1549 | case WRITE_VERIFY: /* SBC - VRProtect */ | |
1550 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | |
1551 | break; | |
1552 | default: | |
1553 | cdb[1] &= 0x1f; /* clear logical unit number */ | |
1554 | break; | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | static struct se_task * | |
1559 | transport_generic_get_task(struct se_cmd *cmd, | |
1560 | enum dma_data_direction data_direction) | |
1561 | { | |
1562 | struct se_task *task; | |
5951146d | 1563 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 1564 | |
6708bb27 | 1565 | task = dev->transport->alloc_task(cmd->t_task_cdb); |
c66ac9db | 1566 | if (!task) { |
6708bb27 | 1567 | pr_err("Unable to allocate struct se_task\n"); |
c66ac9db NB |
1568 | return NULL; |
1569 | } | |
1570 | ||
1571 | INIT_LIST_HEAD(&task->t_list); | |
1572 | INIT_LIST_HEAD(&task->t_execute_list); | |
1573 | INIT_LIST_HEAD(&task->t_state_list); | |
1574 | init_completion(&task->task_stop_comp); | |
c66ac9db NB |
1575 | task->task_se_cmd = cmd; |
1576 | task->se_dev = dev; | |
1577 | task->task_data_direction = data_direction; | |
1578 | ||
c66ac9db NB |
1579 | return task; |
1580 | } | |
1581 | ||
1582 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |
1583 | ||
c66ac9db NB |
1584 | /* |
1585 | * Used by fabric modules containing a local struct se_cmd within their | |
1586 | * fabric dependent per I/O descriptor. | |
1587 | */ | |
1588 | void transport_init_se_cmd( | |
1589 | struct se_cmd *cmd, | |
1590 | struct target_core_fabric_ops *tfo, | |
1591 | struct se_session *se_sess, | |
1592 | u32 data_length, | |
1593 | int data_direction, | |
1594 | int task_attr, | |
1595 | unsigned char *sense_buffer) | |
1596 | { | |
5951146d AG |
1597 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1598 | INIT_LIST_HEAD(&cmd->se_delayed_node); | |
1599 | INIT_LIST_HEAD(&cmd->se_ordered_node); | |
07bde79a | 1600 | INIT_LIST_HEAD(&cmd->se_qf_node); |
c66ac9db | 1601 | |
a1d8b49a AG |
1602 | INIT_LIST_HEAD(&cmd->t_task_list); |
1603 | init_completion(&cmd->transport_lun_fe_stop_comp); | |
1604 | init_completion(&cmd->transport_lun_stop_comp); | |
1605 | init_completion(&cmd->t_transport_stop_comp); | |
1606 | spin_lock_init(&cmd->t_state_lock); | |
1607 | atomic_set(&cmd->transport_dev_active, 1); | |
c66ac9db NB |
1608 | |
1609 | cmd->se_tfo = tfo; | |
1610 | cmd->se_sess = se_sess; | |
1611 | cmd->data_length = data_length; | |
1612 | cmd->data_direction = data_direction; | |
1613 | cmd->sam_task_attr = task_attr; | |
1614 | cmd->sense_buffer = sense_buffer; | |
1615 | } | |
1616 | EXPORT_SYMBOL(transport_init_se_cmd); | |
1617 | ||
1618 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |
1619 | { | |
1620 | /* | |
1621 | * Check if SAM Task Attribute emulation is enabled for this | |
1622 | * struct se_device storage object | |
1623 | */ | |
5951146d | 1624 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
1625 | return 0; |
1626 | ||
e66ecd50 | 1627 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
6708bb27 | 1628 | pr_debug("SAM Task Attribute ACA" |
c66ac9db | 1629 | " emulation is not supported\n"); |
e3d6f909 | 1630 | return -EINVAL; |
c66ac9db NB |
1631 | } |
1632 | /* | |
1633 | * Used to determine when ORDERED commands should go from | |
1634 | * Dormant to Active status. | |
1635 | */ | |
5951146d | 1636 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); |
c66ac9db | 1637 | smp_mb__after_atomic_inc(); |
6708bb27 | 1638 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
c66ac9db | 1639 | cmd->se_ordered_id, cmd->sam_task_attr, |
6708bb27 | 1640 | cmd->se_dev->transport->name); |
c66ac9db NB |
1641 | return 0; |
1642 | } | |
1643 | ||
1644 | void transport_free_se_cmd( | |
1645 | struct se_cmd *se_cmd) | |
1646 | { | |
1647 | if (se_cmd->se_tmr_req) | |
1648 | core_tmr_release_req(se_cmd->se_tmr_req); | |
1649 | /* | |
1650 | * Check and free any extended CDB buffer that was allocated | |
1651 | */ | |
a1d8b49a AG |
1652 | if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) |
1653 | kfree(se_cmd->t_task_cdb); | |
c66ac9db NB |
1654 | } |
1655 | EXPORT_SYMBOL(transport_free_se_cmd); | |
1656 | ||
1657 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | |
1658 | ||
1659 | /* transport_generic_allocate_tasks(): | |
1660 | * | |
1661 | * Called from fabric RX Thread. | |
1662 | */ | |
1663 | int transport_generic_allocate_tasks( | |
1664 | struct se_cmd *cmd, | |
1665 | unsigned char *cdb) | |
1666 | { | |
1667 | int ret; | |
1668 | ||
1669 | transport_generic_prepare_cdb(cdb); | |
1670 | ||
1671 | /* | |
1672 | * This is needed for early exceptions. | |
1673 | */ | |
1674 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
1675 | ||
c66ac9db NB |
1676 | /* |
1677 | * Ensure that the received CDB is less than the max (252 + 8) bytes | |
1678 | * for VARIABLE_LENGTH_CMD | |
1679 | */ | |
1680 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | |
6708bb27 | 1681 | pr_err("Received SCSI CDB with command_size: %d that" |
c66ac9db NB |
1682 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1683 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | |
e3d6f909 | 1684 | return -EINVAL; |
c66ac9db NB |
1685 | } |
1686 | /* | |
1687 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | |
1688 | * allocate the additional extended CDB buffer now.. Otherwise | |
1689 | * setup the pointer from __t_task_cdb to t_task_cdb. | |
1690 | */ | |
a1d8b49a AG |
1691 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { |
1692 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), | |
c66ac9db | 1693 | GFP_KERNEL); |
6708bb27 AG |
1694 | if (!cmd->t_task_cdb) { |
1695 | pr_err("Unable to allocate cmd->t_task_cdb" | |
a1d8b49a | 1696 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", |
c66ac9db | 1697 | scsi_command_size(cdb), |
a1d8b49a | 1698 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
e3d6f909 | 1699 | return -ENOMEM; |
c66ac9db NB |
1700 | } |
1701 | } else | |
a1d8b49a | 1702 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
c66ac9db | 1703 | /* |
a1d8b49a | 1704 | * Copy the original CDB into cmd-> |
c66ac9db | 1705 | */ |
a1d8b49a | 1706 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
c66ac9db NB |
1707 | /* |
1708 | * Setup the received CDB based on SCSI defined opcodes and | |
1709 | * perform unit attention, persistent reservations and ALUA | |
a1d8b49a | 1710 | * checks for virtual device backends. The cmd->t_task_cdb |
c66ac9db NB |
1711 | * pointer is expected to be setup before we reach this point. |
1712 | */ | |
1713 | ret = transport_generic_cmd_sequencer(cmd, cdb); | |
1714 | if (ret < 0) | |
1715 | return ret; | |
1716 | /* | |
1717 | * Check for SAM Task Attribute Emulation | |
1718 | */ | |
1719 | if (transport_check_alloc_task_attr(cmd) < 0) { | |
1720 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
1721 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 1722 | return -EINVAL; |
c66ac9db NB |
1723 | } |
1724 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
1725 | if (cmd->se_lun->lun_sep) | |
1726 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | |
1727 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
1728 | return 0; | |
1729 | } | |
1730 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | |
1731 | ||
1732 | /* | |
1733 | * Used by fabric module frontends not defining a TFO->new_cmd_map() | |
1734 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis | |
1735 | */ | |
1736 | int transport_generic_handle_cdb( | |
1737 | struct se_cmd *cmd) | |
1738 | { | |
e3d6f909 | 1739 | if (!cmd->se_lun) { |
c66ac9db | 1740 | dump_stack(); |
6708bb27 | 1741 | pr_err("cmd->se_lun is NULL\n"); |
e3d6f909 | 1742 | return -EINVAL; |
c66ac9db | 1743 | } |
695434e1 | 1744 | |
c66ac9db NB |
1745 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); |
1746 | return 0; | |
1747 | } | |
1748 | EXPORT_SYMBOL(transport_generic_handle_cdb); | |
1749 | ||
dd8ae59d NB |
1750 | static void transport_generic_request_failure(struct se_cmd *, |
1751 | struct se_device *, int, int); | |
695434e1 NB |
1752 | /* |
1753 | * Used by fabric module frontends to queue tasks directly. | |
1754 | * Many only be used from process context only | |
1755 | */ | |
1756 | int transport_handle_cdb_direct( | |
1757 | struct se_cmd *cmd) | |
1758 | { | |
dd8ae59d NB |
1759 | int ret; |
1760 | ||
695434e1 NB |
1761 | if (!cmd->se_lun) { |
1762 | dump_stack(); | |
6708bb27 | 1763 | pr_err("cmd->se_lun is NULL\n"); |
695434e1 NB |
1764 | return -EINVAL; |
1765 | } | |
1766 | if (in_interrupt()) { | |
1767 | dump_stack(); | |
6708bb27 | 1768 | pr_err("transport_generic_handle_cdb cannot be called" |
695434e1 NB |
1769 | " from interrupt context\n"); |
1770 | return -EINVAL; | |
1771 | } | |
dd8ae59d NB |
1772 | /* |
1773 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following | |
1774 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | |
1775 | * in existing usage to ensure that outstanding descriptors are handled | |
1776 | * correctly during shutdown via transport_generic_wait_for_tasks() | |
1777 | * | |
1778 | * Also, we don't take cmd->t_state_lock here as we only expect | |
1779 | * this to be called for initial descriptor submission. | |
1780 | */ | |
1781 | cmd->t_state = TRANSPORT_NEW_CMD; | |
1782 | atomic_set(&cmd->t_transport_active, 1); | |
1783 | /* | |
1784 | * transport_generic_new_cmd() is already handling QUEUE_FULL, | |
1785 | * so follow TRANSPORT_NEW_CMD processing thread context usage | |
1786 | * and call transport_generic_request_failure() if necessary.. | |
1787 | */ | |
1788 | ret = transport_generic_new_cmd(cmd); | |
1789 | if (ret == -EAGAIN) | |
1790 | return 0; | |
1791 | else if (ret < 0) { | |
1792 | cmd->transport_error_status = ret; | |
1793 | transport_generic_request_failure(cmd, NULL, 0, | |
1794 | (cmd->data_direction != DMA_TO_DEVICE)); | |
1795 | } | |
1796 | return 0; | |
695434e1 NB |
1797 | } |
1798 | EXPORT_SYMBOL(transport_handle_cdb_direct); | |
1799 | ||
c66ac9db NB |
1800 | /* |
1801 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | |
1802 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | |
1803 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | |
1804 | */ | |
1805 | int transport_generic_handle_cdb_map( | |
1806 | struct se_cmd *cmd) | |
1807 | { | |
e3d6f909 | 1808 | if (!cmd->se_lun) { |
c66ac9db | 1809 | dump_stack(); |
6708bb27 | 1810 | pr_err("cmd->se_lun is NULL\n"); |
e3d6f909 | 1811 | return -EINVAL; |
c66ac9db NB |
1812 | } |
1813 | ||
1814 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | |
1815 | return 0; | |
1816 | } | |
1817 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | |
1818 | ||
1819 | /* transport_generic_handle_data(): | |
1820 | * | |
1821 | * | |
1822 | */ | |
1823 | int transport_generic_handle_data( | |
1824 | struct se_cmd *cmd) | |
1825 | { | |
1826 | /* | |
1827 | * For the software fabric case, then we assume the nexus is being | |
1828 | * failed/shutdown when signals are pending from the kthread context | |
1829 | * caller, so we return a failure. For the HW target mode case running | |
1830 | * in interrupt code, the signal_pending() check is skipped. | |
1831 | */ | |
1832 | if (!in_interrupt() && signal_pending(current)) | |
e3d6f909 | 1833 | return -EPERM; |
c66ac9db NB |
1834 | /* |
1835 | * If the received CDB has aleady been ABORTED by the generic | |
1836 | * target engine, we now call transport_check_aborted_status() | |
1837 | * to queue any delated TASK_ABORTED status for the received CDB to the | |
25985edc | 1838 | * fabric module as we are expecting no further incoming DATA OUT |
c66ac9db NB |
1839 | * sequences at this point. |
1840 | */ | |
1841 | if (transport_check_aborted_status(cmd, 1) != 0) | |
1842 | return 0; | |
1843 | ||
1844 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | |
1845 | return 0; | |
1846 | } | |
1847 | EXPORT_SYMBOL(transport_generic_handle_data); | |
1848 | ||
1849 | /* transport_generic_handle_tmr(): | |
1850 | * | |
1851 | * | |
1852 | */ | |
1853 | int transport_generic_handle_tmr( | |
1854 | struct se_cmd *cmd) | |
1855 | { | |
1856 | /* | |
1857 | * This is needed for early exceptions. | |
1858 | */ | |
1859 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
c66ac9db NB |
1860 | |
1861 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | |
1862 | return 0; | |
1863 | } | |
1864 | EXPORT_SYMBOL(transport_generic_handle_tmr); | |
1865 | ||
f4366772 NB |
1866 | void transport_generic_free_cmd_intr( |
1867 | struct se_cmd *cmd) | |
1868 | { | |
1869 | transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); | |
1870 | } | |
1871 | EXPORT_SYMBOL(transport_generic_free_cmd_intr); | |
1872 | ||
c66ac9db NB |
1873 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) |
1874 | { | |
1875 | struct se_task *task, *task_tmp; | |
1876 | unsigned long flags; | |
1877 | int ret = 0; | |
1878 | ||
6708bb27 | 1879 | pr_debug("ITT[0x%08x] - Stopping tasks\n", |
e3d6f909 | 1880 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
1881 | |
1882 | /* | |
1883 | * No tasks remain in the execution queue | |
1884 | */ | |
a1d8b49a | 1885 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 1886 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 1887 | &cmd->t_task_list, t_list) { |
6708bb27 | 1888 | pr_debug("task_no[%d] - Processing task %p\n", |
c66ac9db NB |
1889 | task->task_no, task); |
1890 | /* | |
1891 | * If the struct se_task has not been sent and is not active, | |
1892 | * remove the struct se_task from the execution queue. | |
1893 | */ | |
1894 | if (!atomic_read(&task->task_sent) && | |
1895 | !atomic_read(&task->task_active)) { | |
a1d8b49a | 1896 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1897 | flags); |
1898 | transport_remove_task_from_execute_queue(task, | |
1899 | task->se_dev); | |
1900 | ||
6708bb27 | 1901 | pr_debug("task_no[%d] - Removed from execute queue\n", |
c66ac9db | 1902 | task->task_no); |
a1d8b49a | 1903 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
1904 | continue; |
1905 | } | |
1906 | ||
1907 | /* | |
1908 | * If the struct se_task is active, sleep until it is returned | |
1909 | * from the plugin. | |
1910 | */ | |
1911 | if (atomic_read(&task->task_active)) { | |
1912 | atomic_set(&task->task_stop, 1); | |
a1d8b49a | 1913 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1914 | flags); |
1915 | ||
6708bb27 | 1916 | pr_debug("task_no[%d] - Waiting to complete\n", |
c66ac9db NB |
1917 | task->task_no); |
1918 | wait_for_completion(&task->task_stop_comp); | |
6708bb27 | 1919 | pr_debug("task_no[%d] - Stopped successfully\n", |
c66ac9db NB |
1920 | task->task_no); |
1921 | ||
a1d8b49a AG |
1922 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1923 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
1924 | |
1925 | atomic_set(&task->task_active, 0); | |
1926 | atomic_set(&task->task_stop, 0); | |
1927 | } else { | |
6708bb27 | 1928 | pr_debug("task_no[%d] - Did nothing\n", task->task_no); |
c66ac9db NB |
1929 | ret++; |
1930 | } | |
1931 | ||
1932 | __transport_stop_task_timer(task, &flags); | |
1933 | } | |
a1d8b49a | 1934 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
1935 | |
1936 | return ret; | |
1937 | } | |
1938 | ||
c66ac9db NB |
1939 | /* |
1940 | * Handle SAM-esque emulation for generic transport request failures. | |
1941 | */ | |
1942 | static void transport_generic_request_failure( | |
1943 | struct se_cmd *cmd, | |
1944 | struct se_device *dev, | |
1945 | int complete, | |
1946 | int sc) | |
1947 | { | |
07bde79a NB |
1948 | int ret = 0; |
1949 | ||
6708bb27 | 1950 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
e3d6f909 | 1951 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 1952 | cmd->t_task_cdb[0]); |
6708bb27 | 1953 | pr_debug("-----[ i_state: %d t_state/def_t_state:" |
c66ac9db | 1954 | " %d/%d transport_error_status: %d\n", |
e3d6f909 | 1955 | cmd->se_tfo->get_cmd_state(cmd), |
c66ac9db NB |
1956 | cmd->t_state, cmd->deferred_t_state, |
1957 | cmd->transport_error_status); | |
6708bb27 | 1958 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" |
c66ac9db NB |
1959 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
1960 | " t_transport_active: %d t_transport_stop: %d" | |
6708bb27 | 1961 | " t_transport_sent: %d\n", cmd->t_task_list_num, |
a1d8b49a AG |
1962 | atomic_read(&cmd->t_task_cdbs_left), |
1963 | atomic_read(&cmd->t_task_cdbs_sent), | |
1964 | atomic_read(&cmd->t_task_cdbs_ex_left), | |
1965 | atomic_read(&cmd->t_transport_active), | |
1966 | atomic_read(&cmd->t_transport_stop), | |
1967 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
1968 | |
1969 | transport_stop_all_task_timers(cmd); | |
1970 | ||
1971 | if (dev) | |
e3d6f909 | 1972 | atomic_inc(&dev->depth_left); |
c66ac9db NB |
1973 | /* |
1974 | * For SAM Task Attribute emulation for failed struct se_cmd | |
1975 | */ | |
1976 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
1977 | transport_complete_task_attr(cmd); | |
1978 | ||
1979 | if (complete) { | |
1980 | transport_direct_request_timeout(cmd); | |
1981 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
1982 | } | |
1983 | ||
1984 | switch (cmd->transport_error_status) { | |
1985 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | |
1986 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
1987 | break; | |
1988 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | |
1989 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
1990 | break; | |
1991 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | |
1992 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
1993 | break; | |
1994 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | |
1995 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | |
1996 | break; | |
1997 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | |
1998 | if (!sc) | |
1999 | transport_new_cmd_failure(cmd); | |
2000 | /* | |
2001 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | |
2002 | * we force this session to fall back to session | |
2003 | * recovery. | |
2004 | */ | |
e3d6f909 AG |
2005 | cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); |
2006 | cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); | |
c66ac9db NB |
2007 | |
2008 | goto check_stop; | |
2009 | case PYX_TRANSPORT_LU_COMM_FAILURE: | |
2010 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | |
2011 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
2012 | break; | |
2013 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | |
2014 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | |
2015 | break; | |
2016 | case PYX_TRANSPORT_WRITE_PROTECTED: | |
2017 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
2018 | break; | |
2019 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | |
2020 | /* | |
2021 | * No SENSE Data payload for this case, set SCSI Status | |
2022 | * and queue the response to $FABRIC_MOD. | |
2023 | * | |
2024 | * Uses linux/include/scsi/scsi.h SAM status codes defs | |
2025 | */ | |
2026 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2027 | /* | |
2028 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2029 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2030 | * CONFLICT STATUS. | |
2031 | * | |
2032 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2033 | */ | |
e3d6f909 AG |
2034 | if (cmd->se_sess && |
2035 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2036 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2037 | cmd->orig_fe_lun, 0x2C, |
2038 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
2039 | ||
07bde79a NB |
2040 | ret = cmd->se_tfo->queue_status(cmd); |
2041 | if (ret == -EAGAIN) | |
2042 | goto queue_full; | |
c66ac9db NB |
2043 | goto check_stop; |
2044 | case PYX_TRANSPORT_USE_SENSE_REASON: | |
2045 | /* | |
2046 | * struct se_cmd->scsi_sense_reason already set | |
2047 | */ | |
2048 | break; | |
2049 | default: | |
6708bb27 | 2050 | pr_err("Unknown transport error for CDB 0x%02x: %d\n", |
a1d8b49a | 2051 | cmd->t_task_cdb[0], |
c66ac9db NB |
2052 | cmd->transport_error_status); |
2053 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2054 | break; | |
2055 | } | |
16ab8e60 NB |
2056 | /* |
2057 | * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, | |
2058 | * make the call to transport_send_check_condition_and_sense() | |
2059 | * directly. Otherwise expect the fabric to make the call to | |
2060 | * transport_send_check_condition_and_sense() after handling | |
2061 | * possible unsoliticied write data payloads. | |
2062 | */ | |
2063 | if (!sc && !cmd->se_tfo->new_cmd_map) | |
c66ac9db | 2064 | transport_new_cmd_failure(cmd); |
07bde79a NB |
2065 | else { |
2066 | ret = transport_send_check_condition_and_sense(cmd, | |
2067 | cmd->scsi_sense_reason, 0); | |
2068 | if (ret == -EAGAIN) | |
2069 | goto queue_full; | |
2070 | } | |
2071 | ||
c66ac9db NB |
2072 | check_stop: |
2073 | transport_lun_remove_cmd(cmd); | |
6708bb27 | 2074 | if (!transport_cmd_check_stop_to_fabric(cmd)) |
c66ac9db | 2075 | ; |
07bde79a NB |
2076 | return; |
2077 | ||
2078 | queue_full: | |
2079 | cmd->t_state = TRANSPORT_COMPLETE_OK; | |
2080 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | |
c66ac9db NB |
2081 | } |
2082 | ||
2083 | static void transport_direct_request_timeout(struct se_cmd *cmd) | |
2084 | { | |
2085 | unsigned long flags; | |
2086 | ||
a1d8b49a | 2087 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 2088 | if (!atomic_read(&cmd->t_transport_timeout)) { |
a1d8b49a | 2089 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2090 | return; |
2091 | } | |
a1d8b49a AG |
2092 | if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { |
2093 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2094 | return; |
2095 | } | |
2096 | ||
a1d8b49a AG |
2097 | atomic_sub(atomic_read(&cmd->t_transport_timeout), |
2098 | &cmd->t_se_count); | |
2099 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2100 | } |
2101 | ||
2102 | static void transport_generic_request_timeout(struct se_cmd *cmd) | |
2103 | { | |
2104 | unsigned long flags; | |
2105 | ||
2106 | /* | |
a1d8b49a | 2107 | * Reset cmd->t_se_count to allow transport_generic_remove() |
c66ac9db NB |
2108 | * to allow last call to free memory resources. |
2109 | */ | |
a1d8b49a AG |
2110 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2111 | if (atomic_read(&cmd->t_transport_timeout) > 1) { | |
2112 | int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); | |
c66ac9db | 2113 | |
a1d8b49a | 2114 | atomic_sub(tmp, &cmd->t_se_count); |
c66ac9db | 2115 | } |
a1d8b49a | 2116 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2117 | |
35462975 | 2118 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
2119 | } |
2120 | ||
c66ac9db NB |
2121 | static inline u32 transport_lba_21(unsigned char *cdb) |
2122 | { | |
2123 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | |
2124 | } | |
2125 | ||
2126 | static inline u32 transport_lba_32(unsigned char *cdb) | |
2127 | { | |
2128 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2129 | } | |
2130 | ||
2131 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | |
2132 | { | |
2133 | unsigned int __v1, __v2; | |
2134 | ||
2135 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2136 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
2137 | ||
2138 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2139 | } | |
2140 | ||
2141 | /* | |
2142 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | |
2143 | */ | |
2144 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | |
2145 | { | |
2146 | unsigned int __v1, __v2; | |
2147 | ||
2148 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | |
2149 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | |
2150 | ||
2151 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2152 | } | |
2153 | ||
2154 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |
2155 | { | |
2156 | unsigned long flags; | |
2157 | ||
a1d8b49a | 2158 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db | 2159 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
a1d8b49a | 2160 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2161 | } |
2162 | ||
2163 | /* | |
2164 | * Called from interrupt context. | |
2165 | */ | |
2166 | static void transport_task_timeout_handler(unsigned long data) | |
2167 | { | |
2168 | struct se_task *task = (struct se_task *)data; | |
e3d6f909 | 2169 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
2170 | unsigned long flags; |
2171 | ||
6708bb27 | 2172 | pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); |
c66ac9db | 2173 | |
a1d8b49a | 2174 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2175 | if (task->task_flags & TF_STOP) { |
a1d8b49a | 2176 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2177 | return; |
2178 | } | |
2179 | task->task_flags &= ~TF_RUNNING; | |
2180 | ||
2181 | /* | |
2182 | * Determine if transport_complete_task() has already been called. | |
2183 | */ | |
6708bb27 AG |
2184 | if (!atomic_read(&task->task_active)) { |
2185 | pr_debug("transport task: %p cmd: %p timeout task_active" | |
c66ac9db | 2186 | " == 0\n", task, cmd); |
a1d8b49a | 2187 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2188 | return; |
2189 | } | |
2190 | ||
a1d8b49a AG |
2191 | atomic_inc(&cmd->t_se_count); |
2192 | atomic_inc(&cmd->t_transport_timeout); | |
2193 | cmd->t_tasks_failed = 1; | |
c66ac9db NB |
2194 | |
2195 | atomic_set(&task->task_timeout, 1); | |
2196 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | |
2197 | task->task_scsi_status = 1; | |
2198 | ||
2199 | if (atomic_read(&task->task_stop)) { | |
6708bb27 | 2200 | pr_debug("transport task: %p cmd: %p timeout task_stop" |
c66ac9db | 2201 | " == 1\n", task, cmd); |
a1d8b49a | 2202 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2203 | complete(&task->task_stop_comp); |
2204 | return; | |
2205 | } | |
2206 | ||
6708bb27 AG |
2207 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
2208 | pr_debug("transport task: %p cmd: %p timeout non zero" | |
c66ac9db | 2209 | " t_task_cdbs_left\n", task, cmd); |
a1d8b49a | 2210 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2211 | return; |
2212 | } | |
6708bb27 | 2213 | pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", |
c66ac9db NB |
2214 | task, cmd); |
2215 | ||
2216 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | |
a1d8b49a | 2217 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2218 | |
2219 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | |
2220 | } | |
2221 | ||
2222 | /* | |
a1d8b49a | 2223 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
2224 | */ |
2225 | static void transport_start_task_timer(struct se_task *task) | |
2226 | { | |
2227 | struct se_device *dev = task->se_dev; | |
2228 | int timeout; | |
2229 | ||
2230 | if (task->task_flags & TF_RUNNING) | |
2231 | return; | |
2232 | /* | |
2233 | * If the task_timeout is disabled, exit now. | |
2234 | */ | |
e3d6f909 | 2235 | timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; |
6708bb27 | 2236 | if (!timeout) |
c66ac9db NB |
2237 | return; |
2238 | ||
2239 | init_timer(&task->task_timer); | |
2240 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | |
2241 | task->task_timer.data = (unsigned long) task; | |
2242 | task->task_timer.function = transport_task_timeout_handler; | |
2243 | ||
2244 | task->task_flags |= TF_RUNNING; | |
2245 | add_timer(&task->task_timer); | |
2246 | #if 0 | |
6708bb27 | 2247 | pr_debug("Starting task timer for cmd: %p task: %p seconds:" |
c66ac9db NB |
2248 | " %d\n", task->task_se_cmd, task, timeout); |
2249 | #endif | |
2250 | } | |
2251 | ||
2252 | /* | |
a1d8b49a | 2253 | * Called with spin_lock_irq(&cmd->t_state_lock) held. |
c66ac9db NB |
2254 | */ |
2255 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | |
2256 | { | |
e3d6f909 | 2257 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db | 2258 | |
6708bb27 | 2259 | if (!task->task_flags & TF_RUNNING) |
c66ac9db NB |
2260 | return; |
2261 | ||
2262 | task->task_flags |= TF_STOP; | |
a1d8b49a | 2263 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2264 | |
2265 | del_timer_sync(&task->task_timer); | |
2266 | ||
a1d8b49a | 2267 | spin_lock_irqsave(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2268 | task->task_flags &= ~TF_RUNNING; |
2269 | task->task_flags &= ~TF_STOP; | |
2270 | } | |
2271 | ||
2272 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | |
2273 | { | |
2274 | struct se_task *task = NULL, *task_tmp; | |
2275 | unsigned long flags; | |
2276 | ||
a1d8b49a | 2277 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2278 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 2279 | &cmd->t_task_list, t_list) |
c66ac9db | 2280 | __transport_stop_task_timer(task, &flags); |
a1d8b49a | 2281 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2282 | } |
2283 | ||
2284 | static inline int transport_tcq_window_closed(struct se_device *dev) | |
2285 | { | |
2286 | if (dev->dev_tcq_window_closed++ < | |
2287 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | |
2288 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | |
2289 | } else | |
2290 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | |
2291 | ||
e3d6f909 | 2292 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
2293 | return 0; |
2294 | } | |
2295 | ||
2296 | /* | |
2297 | * Called from Fabric Module context from transport_execute_tasks() | |
2298 | * | |
2299 | * The return of this function determins if the tasks from struct se_cmd | |
2300 | * get added to the execution queue in transport_execute_tasks(), | |
2301 | * or are added to the delayed or ordered lists here. | |
2302 | */ | |
2303 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | |
2304 | { | |
5951146d | 2305 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
2306 | return 1; |
2307 | /* | |
25985edc | 2308 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
c66ac9db NB |
2309 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2310 | */ | |
e66ecd50 | 2311 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
5951146d | 2312 | atomic_inc(&cmd->se_dev->dev_hoq_count); |
c66ac9db | 2313 | smp_mb__after_atomic_inc(); |
6708bb27 | 2314 | pr_debug("Added HEAD_OF_QUEUE for CDB:" |
c66ac9db | 2315 | " 0x%02x, se_ordered_id: %u\n", |
6708bb27 | 2316 | cmd->t_task_cdb[0], |
c66ac9db NB |
2317 | cmd->se_ordered_id); |
2318 | return 1; | |
e66ecd50 | 2319 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
5951146d AG |
2320 | spin_lock(&cmd->se_dev->ordered_cmd_lock); |
2321 | list_add_tail(&cmd->se_ordered_node, | |
2322 | &cmd->se_dev->ordered_cmd_list); | |
2323 | spin_unlock(&cmd->se_dev->ordered_cmd_lock); | |
c66ac9db | 2324 | |
5951146d | 2325 | atomic_inc(&cmd->se_dev->dev_ordered_sync); |
c66ac9db NB |
2326 | smp_mb__after_atomic_inc(); |
2327 | ||
6708bb27 | 2328 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered" |
c66ac9db | 2329 | " list, se_ordered_id: %u\n", |
a1d8b49a | 2330 | cmd->t_task_cdb[0], |
c66ac9db NB |
2331 | cmd->se_ordered_id); |
2332 | /* | |
2333 | * Add ORDERED command to tail of execution queue if | |
2334 | * no other older commands exist that need to be | |
2335 | * completed first. | |
2336 | */ | |
6708bb27 | 2337 | if (!atomic_read(&cmd->se_dev->simple_cmds)) |
c66ac9db NB |
2338 | return 1; |
2339 | } else { | |
2340 | /* | |
2341 | * For SIMPLE and UNTAGGED Task Attribute commands | |
2342 | */ | |
5951146d | 2343 | atomic_inc(&cmd->se_dev->simple_cmds); |
c66ac9db NB |
2344 | smp_mb__after_atomic_inc(); |
2345 | } | |
2346 | /* | |
2347 | * Otherwise if one or more outstanding ORDERED task attribute exist, | |
2348 | * add the dormant task(s) built for the passed struct se_cmd to the | |
2349 | * execution queue and become in Active state for this struct se_device. | |
2350 | */ | |
5951146d | 2351 | if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { |
c66ac9db NB |
2352 | /* |
2353 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | |
25985edc | 2354 | * will be drained upon completion of HEAD_OF_QUEUE task. |
c66ac9db | 2355 | */ |
5951146d | 2356 | spin_lock(&cmd->se_dev->delayed_cmd_lock); |
c66ac9db | 2357 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
5951146d AG |
2358 | list_add_tail(&cmd->se_delayed_node, |
2359 | &cmd->se_dev->delayed_cmd_list); | |
2360 | spin_unlock(&cmd->se_dev->delayed_cmd_lock); | |
c66ac9db | 2361 | |
6708bb27 | 2362 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" |
c66ac9db | 2363 | " delayed CMD list, se_ordered_id: %u\n", |
a1d8b49a | 2364 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
c66ac9db NB |
2365 | cmd->se_ordered_id); |
2366 | /* | |
2367 | * Return zero to let transport_execute_tasks() know | |
2368 | * not to add the delayed tasks to the execution list. | |
2369 | */ | |
2370 | return 0; | |
2371 | } | |
2372 | /* | |
2373 | * Otherwise, no ORDERED task attributes exist.. | |
2374 | */ | |
2375 | return 1; | |
2376 | } | |
2377 | ||
2378 | /* | |
2379 | * Called from fabric module context in transport_generic_new_cmd() and | |
2380 | * transport_generic_process_write() | |
2381 | */ | |
2382 | static int transport_execute_tasks(struct se_cmd *cmd) | |
2383 | { | |
2384 | int add_tasks; | |
2385 | ||
db1620a2 CH |
2386 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { |
2387 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
2388 | transport_generic_request_failure(cmd, NULL, 0, 1); | |
2389 | return 0; | |
c66ac9db | 2390 | } |
db1620a2 | 2391 | |
c66ac9db NB |
2392 | /* |
2393 | * Call transport_cmd_check_stop() to see if a fabric exception | |
25985edc | 2394 | * has occurred that prevents execution. |
c66ac9db | 2395 | */ |
6708bb27 | 2396 | if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { |
c66ac9db NB |
2397 | /* |
2398 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | |
2399 | * attribute for the tasks of the received struct se_cmd CDB | |
2400 | */ | |
2401 | add_tasks = transport_execute_task_attr(cmd); | |
e3d6f909 | 2402 | if (!add_tasks) |
c66ac9db NB |
2403 | goto execute_tasks; |
2404 | /* | |
2405 | * This calls transport_add_tasks_from_cmd() to handle | |
2406 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | |
2407 | * (if enabled) in __transport_add_task_to_execute_queue() and | |
2408 | * transport_add_task_check_sam_attr(). | |
2409 | */ | |
2410 | transport_add_tasks_from_cmd(cmd); | |
2411 | } | |
2412 | /* | |
2413 | * Kick the execution queue for the cmd associated struct se_device | |
2414 | * storage object. | |
2415 | */ | |
2416 | execute_tasks: | |
5951146d | 2417 | __transport_execute_tasks(cmd->se_dev); |
c66ac9db NB |
2418 | return 0; |
2419 | } | |
2420 | ||
2421 | /* | |
2422 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | |
2423 | * from struct se_device->execute_task_list and | |
2424 | * | |
2425 | * Called from transport_processing_thread() | |
2426 | */ | |
2427 | static int __transport_execute_tasks(struct se_device *dev) | |
2428 | { | |
2429 | int error; | |
2430 | struct se_cmd *cmd = NULL; | |
e3d6f909 | 2431 | struct se_task *task = NULL; |
c66ac9db NB |
2432 | unsigned long flags; |
2433 | ||
2434 | /* | |
2435 | * Check if there is enough room in the device and HBA queue to send | |
a1d8b49a | 2436 | * struct se_tasks to the selected transport. |
c66ac9db NB |
2437 | */ |
2438 | check_depth: | |
e3d6f909 | 2439 | if (!atomic_read(&dev->depth_left)) |
c66ac9db | 2440 | return transport_tcq_window_closed(dev); |
c66ac9db | 2441 | |
e3d6f909 | 2442 | dev->dev_tcq_window_closed = 0; |
c66ac9db | 2443 | |
e3d6f909 AG |
2444 | spin_lock_irq(&dev->execute_task_lock); |
2445 | if (list_empty(&dev->execute_task_list)) { | |
2446 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2447 | return 0; |
2448 | } | |
e3d6f909 AG |
2449 | task = list_first_entry(&dev->execute_task_list, |
2450 | struct se_task, t_execute_list); | |
2451 | list_del(&task->t_execute_list); | |
2452 | atomic_set(&task->task_execute_queue, 0); | |
2453 | atomic_dec(&dev->execute_tasks); | |
2454 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2455 | |
2456 | atomic_dec(&dev->depth_left); | |
c66ac9db | 2457 | |
e3d6f909 | 2458 | cmd = task->task_se_cmd; |
c66ac9db | 2459 | |
a1d8b49a | 2460 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
2461 | atomic_set(&task->task_active, 1); |
2462 | atomic_set(&task->task_sent, 1); | |
a1d8b49a | 2463 | atomic_inc(&cmd->t_task_cdbs_sent); |
c66ac9db | 2464 | |
a1d8b49a AG |
2465 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2466 | cmd->t_task_list_num) | |
c66ac9db NB |
2467 | atomic_set(&cmd->transport_sent, 1); |
2468 | ||
2469 | transport_start_task_timer(task); | |
a1d8b49a | 2470 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2471 | /* |
2472 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | |
e3d6f909 | 2473 | * to grab REPORT_LUNS and other CDBs we want to handle before they hit the |
c66ac9db NB |
2474 | * struct se_subsystem_api->do_task() caller below. |
2475 | */ | |
2476 | if (cmd->transport_emulate_cdb) { | |
2477 | error = cmd->transport_emulate_cdb(cmd); | |
2478 | if (error != 0) { | |
2479 | cmd->transport_error_status = error; | |
2480 | atomic_set(&task->task_active, 0); | |
2481 | atomic_set(&cmd->transport_sent, 0); | |
2482 | transport_stop_tasks_for_cmd(cmd); | |
2483 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2484 | goto check_depth; | |
2485 | } | |
2486 | /* | |
2487 | * Handle the successful completion for transport_emulate_cdb() | |
2488 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | |
2489 | * Otherwise the caller is expected to complete the task with | |
2490 | * proper status. | |
2491 | */ | |
2492 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
2493 | cmd->scsi_status = SAM_STAT_GOOD; | |
2494 | task->task_scsi_status = GOOD; | |
2495 | transport_complete_task(task, 1); | |
2496 | } | |
2497 | } else { | |
2498 | /* | |
2499 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | |
2500 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | |
2501 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | |
2502 | * LUN emulation code. | |
2503 | * | |
2504 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | |
2505 | * call ->do_task() directly and let the underlying TCM subsystem plugin | |
2506 | * code handle the CDB emulation. | |
2507 | */ | |
e3d6f909 AG |
2508 | if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2509 | (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | |
c66ac9db NB |
2510 | error = transport_emulate_control_cdb(task); |
2511 | else | |
e3d6f909 | 2512 | error = dev->transport->do_task(task); |
c66ac9db NB |
2513 | |
2514 | if (error != 0) { | |
2515 | cmd->transport_error_status = error; | |
2516 | atomic_set(&task->task_active, 0); | |
2517 | atomic_set(&cmd->transport_sent, 0); | |
2518 | transport_stop_tasks_for_cmd(cmd); | |
2519 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2520 | } | |
2521 | } | |
2522 | ||
2523 | goto check_depth; | |
2524 | ||
2525 | return 0; | |
2526 | } | |
2527 | ||
2528 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | |
2529 | { | |
2530 | unsigned long flags; | |
2531 | /* | |
2532 | * Any unsolicited data will get dumped for failed command inside of | |
2533 | * the fabric plugin | |
2534 | */ | |
a1d8b49a | 2535 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2536 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2537 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
a1d8b49a | 2538 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2539 | } |
2540 | ||
2541 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | |
2542 | ||
2543 | static inline u32 transport_get_sectors_6( | |
2544 | unsigned char *cdb, | |
2545 | struct se_cmd *cmd, | |
2546 | int *ret) | |
2547 | { | |
5951146d | 2548 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2549 | |
2550 | /* | |
2551 | * Assume TYPE_DISK for non struct se_device objects. | |
2552 | * Use 8-bit sector value. | |
2553 | */ | |
2554 | if (!dev) | |
2555 | goto type_disk; | |
2556 | ||
2557 | /* | |
2558 | * Use 24-bit allocation length for TYPE_TAPE. | |
2559 | */ | |
e3d6f909 | 2560 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2561 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2562 | ||
2563 | /* | |
2564 | * Everything else assume TYPE_DISK Sector CDB location. | |
2565 | * Use 8-bit sector value. | |
2566 | */ | |
2567 | type_disk: | |
2568 | return (u32)cdb[4]; | |
2569 | } | |
2570 | ||
2571 | static inline u32 transport_get_sectors_10( | |
2572 | unsigned char *cdb, | |
2573 | struct se_cmd *cmd, | |
2574 | int *ret) | |
2575 | { | |
5951146d | 2576 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2577 | |
2578 | /* | |
2579 | * Assume TYPE_DISK for non struct se_device objects. | |
2580 | * Use 16-bit sector value. | |
2581 | */ | |
2582 | if (!dev) | |
2583 | goto type_disk; | |
2584 | ||
2585 | /* | |
2586 | * XXX_10 is not defined in SSC, throw an exception | |
2587 | */ | |
e3d6f909 AG |
2588 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2589 | *ret = -EINVAL; | |
c66ac9db NB |
2590 | return 0; |
2591 | } | |
2592 | ||
2593 | /* | |
2594 | * Everything else assume TYPE_DISK Sector CDB location. | |
2595 | * Use 16-bit sector value. | |
2596 | */ | |
2597 | type_disk: | |
2598 | return (u32)(cdb[7] << 8) + cdb[8]; | |
2599 | } | |
2600 | ||
2601 | static inline u32 transport_get_sectors_12( | |
2602 | unsigned char *cdb, | |
2603 | struct se_cmd *cmd, | |
2604 | int *ret) | |
2605 | { | |
5951146d | 2606 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2607 | |
2608 | /* | |
2609 | * Assume TYPE_DISK for non struct se_device objects. | |
2610 | * Use 32-bit sector value. | |
2611 | */ | |
2612 | if (!dev) | |
2613 | goto type_disk; | |
2614 | ||
2615 | /* | |
2616 | * XXX_12 is not defined in SSC, throw an exception | |
2617 | */ | |
e3d6f909 AG |
2618 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2619 | *ret = -EINVAL; | |
c66ac9db NB |
2620 | return 0; |
2621 | } | |
2622 | ||
2623 | /* | |
2624 | * Everything else assume TYPE_DISK Sector CDB location. | |
2625 | * Use 32-bit sector value. | |
2626 | */ | |
2627 | type_disk: | |
2628 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | |
2629 | } | |
2630 | ||
2631 | static inline u32 transport_get_sectors_16( | |
2632 | unsigned char *cdb, | |
2633 | struct se_cmd *cmd, | |
2634 | int *ret) | |
2635 | { | |
5951146d | 2636 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2637 | |
2638 | /* | |
2639 | * Assume TYPE_DISK for non struct se_device objects. | |
2640 | * Use 32-bit sector value. | |
2641 | */ | |
2642 | if (!dev) | |
2643 | goto type_disk; | |
2644 | ||
2645 | /* | |
2646 | * Use 24-bit allocation length for TYPE_TAPE. | |
2647 | */ | |
e3d6f909 | 2648 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2649 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2650 | ||
2651 | type_disk: | |
2652 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | |
2653 | (cdb[12] << 8) + cdb[13]; | |
2654 | } | |
2655 | ||
2656 | /* | |
2657 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | |
2658 | */ | |
2659 | static inline u32 transport_get_sectors_32( | |
2660 | unsigned char *cdb, | |
2661 | struct se_cmd *cmd, | |
2662 | int *ret) | |
2663 | { | |
2664 | /* | |
2665 | * Assume TYPE_DISK for non struct se_device objects. | |
2666 | * Use 32-bit sector value. | |
2667 | */ | |
2668 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | |
2669 | (cdb[30] << 8) + cdb[31]; | |
2670 | ||
2671 | } | |
2672 | ||
2673 | static inline u32 transport_get_size( | |
2674 | u32 sectors, | |
2675 | unsigned char *cdb, | |
2676 | struct se_cmd *cmd) | |
2677 | { | |
5951146d | 2678 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 2679 | |
e3d6f909 | 2680 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
c66ac9db | 2681 | if (cdb[1] & 1) { /* sectors */ |
e3d6f909 | 2682 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2683 | } else /* bytes */ |
2684 | return sectors; | |
2685 | } | |
2686 | #if 0 | |
6708bb27 | 2687 | pr_debug("Returning block_size: %u, sectors: %u == %u for" |
e3d6f909 AG |
2688 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, |
2689 | dev->se_sub_dev->se_dev_attrib.block_size * sectors, | |
2690 | dev->transport->name); | |
c66ac9db | 2691 | #endif |
e3d6f909 | 2692 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2693 | } |
2694 | ||
c66ac9db NB |
2695 | static void transport_xor_callback(struct se_cmd *cmd) |
2696 | { | |
2697 | unsigned char *buf, *addr; | |
ec98f782 | 2698 | struct scatterlist *sg; |
c66ac9db NB |
2699 | unsigned int offset; |
2700 | int i; | |
ec98f782 | 2701 | int count; |
c66ac9db NB |
2702 | /* |
2703 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | |
2704 | * | |
2705 | * 1) read the specified logical block(s); | |
2706 | * 2) transfer logical blocks from the data-out buffer; | |
2707 | * 3) XOR the logical blocks transferred from the data-out buffer with | |
2708 | * the logical blocks read, storing the resulting XOR data in a buffer; | |
2709 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | |
2710 | * blocks transferred from the data-out buffer; and | |
2711 | * 5) transfer the resulting XOR data to the data-in buffer. | |
2712 | */ | |
2713 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | |
6708bb27 AG |
2714 | if (!buf) { |
2715 | pr_err("Unable to allocate xor_callback buf\n"); | |
c66ac9db NB |
2716 | return; |
2717 | } | |
2718 | /* | |
ec98f782 | 2719 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg |
c66ac9db NB |
2720 | * into the locally allocated *buf |
2721 | */ | |
ec98f782 AG |
2722 | sg_copy_to_buffer(cmd->t_data_sg, |
2723 | cmd->t_data_nents, | |
2724 | buf, | |
2725 | cmd->data_length); | |
2726 | ||
c66ac9db NB |
2727 | /* |
2728 | * Now perform the XOR against the BIDI read memory located at | |
a1d8b49a | 2729 | * cmd->t_mem_bidi_list |
c66ac9db NB |
2730 | */ |
2731 | ||
2732 | offset = 0; | |
ec98f782 AG |
2733 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { |
2734 | addr = kmap_atomic(sg_page(sg), KM_USER0); | |
2735 | if (!addr) | |
c66ac9db NB |
2736 | goto out; |
2737 | ||
ec98f782 AG |
2738 | for (i = 0; i < sg->length; i++) |
2739 | *(addr + sg->offset + i) ^= *(buf + offset + i); | |
c66ac9db | 2740 | |
ec98f782 | 2741 | offset += sg->length; |
c66ac9db NB |
2742 | kunmap_atomic(addr, KM_USER0); |
2743 | } | |
ec98f782 | 2744 | |
c66ac9db NB |
2745 | out: |
2746 | kfree(buf); | |
2747 | } | |
2748 | ||
2749 | /* | |
2750 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | |
2751 | */ | |
2752 | static int transport_get_sense_data(struct se_cmd *cmd) | |
2753 | { | |
2754 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | |
2755 | struct se_device *dev; | |
2756 | struct se_task *task = NULL, *task_tmp; | |
2757 | unsigned long flags; | |
2758 | u32 offset = 0; | |
2759 | ||
e3d6f909 AG |
2760 | WARN_ON(!cmd->se_lun); |
2761 | ||
a1d8b49a | 2762 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2763 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 2764 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2765 | return 0; |
2766 | } | |
2767 | ||
2768 | list_for_each_entry_safe(task, task_tmp, | |
a1d8b49a | 2769 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
2770 | |
2771 | if (!task->task_sense) | |
2772 | continue; | |
2773 | ||
2774 | dev = task->se_dev; | |
6708bb27 | 2775 | if (!dev) |
c66ac9db NB |
2776 | continue; |
2777 | ||
e3d6f909 | 2778 | if (!dev->transport->get_sense_buffer) { |
6708bb27 | 2779 | pr_err("dev->transport->get_sense_buffer" |
c66ac9db NB |
2780 | " is NULL\n"); |
2781 | continue; | |
2782 | } | |
2783 | ||
e3d6f909 | 2784 | sense_buffer = dev->transport->get_sense_buffer(task); |
6708bb27 AG |
2785 | if (!sense_buffer) { |
2786 | pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" | |
c66ac9db | 2787 | " sense buffer for task with sense\n", |
e3d6f909 | 2788 | cmd->se_tfo->get_task_tag(cmd), task->task_no); |
c66ac9db NB |
2789 | continue; |
2790 | } | |
a1d8b49a | 2791 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2792 | |
e3d6f909 | 2793 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
2794 | TRANSPORT_SENSE_BUFFER); |
2795 | ||
5951146d | 2796 | memcpy(&buffer[offset], sense_buffer, |
c66ac9db NB |
2797 | TRANSPORT_SENSE_BUFFER); |
2798 | cmd->scsi_status = task->task_scsi_status; | |
2799 | /* Automatically padded */ | |
2800 | cmd->scsi_sense_length = | |
2801 | (TRANSPORT_SENSE_BUFFER + offset); | |
2802 | ||
6708bb27 | 2803 | pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" |
c66ac9db | 2804 | " and sense\n", |
e3d6f909 | 2805 | dev->se_hba->hba_id, dev->transport->name, |
c66ac9db NB |
2806 | cmd->scsi_status); |
2807 | return 0; | |
2808 | } | |
a1d8b49a | 2809 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2810 | |
2811 | return -1; | |
2812 | } | |
2813 | ||
c66ac9db NB |
2814 | static int |
2815 | transport_handle_reservation_conflict(struct se_cmd *cmd) | |
2816 | { | |
2817 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
2818 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2819 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | |
2820 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2821 | /* | |
2822 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2823 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2824 | * CONFLICT STATUS. | |
2825 | * | |
2826 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2827 | */ | |
e3d6f909 AG |
2828 | if (cmd->se_sess && |
2829 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2830 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2831 | cmd->orig_fe_lun, 0x2C, |
2832 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
5951146d | 2833 | return -EINVAL; |
c66ac9db NB |
2834 | } |
2835 | ||
ec98f782 AG |
2836 | static inline long long transport_dev_end_lba(struct se_device *dev) |
2837 | { | |
2838 | return dev->transport->get_blocks(dev) + 1; | |
2839 | } | |
2840 | ||
2841 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | |
2842 | { | |
2843 | struct se_device *dev = cmd->se_dev; | |
2844 | u32 sectors; | |
2845 | ||
2846 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | |
2847 | return 0; | |
2848 | ||
2849 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | |
2850 | ||
6708bb27 AG |
2851 | if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { |
2852 | pr_err("LBA: %llu Sectors: %u exceeds" | |
ec98f782 AG |
2853 | " transport_dev_end_lba(): %llu\n", |
2854 | cmd->t_task_lba, sectors, | |
2855 | transport_dev_end_lba(dev)); | |
7abbe7f3 | 2856 | return -EINVAL; |
ec98f782 AG |
2857 | } |
2858 | ||
7abbe7f3 | 2859 | return 0; |
ec98f782 AG |
2860 | } |
2861 | ||
706d5860 NB |
2862 | static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) |
2863 | { | |
2864 | /* | |
2865 | * Determine if the received WRITE_SAME is used to for direct | |
2866 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
2867 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
2868 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | |
2869 | */ | |
2870 | int passthrough = (dev->transport->transport_type == | |
2871 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
2872 | ||
2873 | if (!passthrough) { | |
2874 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | |
2875 | pr_err("WRITE_SAME PBDATA and LBDATA" | |
2876 | " bits not supported for Block Discard" | |
2877 | " Emulation\n"); | |
2878 | return -ENOSYS; | |
2879 | } | |
2880 | /* | |
2881 | * Currently for the emulated case we only accept | |
2882 | * tpws with the UNMAP=1 bit set. | |
2883 | */ | |
2884 | if (!(flags[0] & 0x08)) { | |
2885 | pr_err("WRITE_SAME w/o UNMAP bit not" | |
2886 | " supported for Block Discard Emulation\n"); | |
2887 | return -ENOSYS; | |
2888 | } | |
2889 | } | |
2890 | ||
2891 | return 0; | |
2892 | } | |
2893 | ||
c66ac9db NB |
2894 | /* transport_generic_cmd_sequencer(): |
2895 | * | |
2896 | * Generic Command Sequencer that should work for most DAS transport | |
2897 | * drivers. | |
2898 | * | |
2899 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | |
2900 | * RX Thread. | |
2901 | * | |
2902 | * FIXME: Need to support other SCSI OPCODES where as well. | |
2903 | */ | |
2904 | static int transport_generic_cmd_sequencer( | |
2905 | struct se_cmd *cmd, | |
2906 | unsigned char *cdb) | |
2907 | { | |
5951146d | 2908 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2909 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
2910 | int ret = 0, sector_ret = 0, passthrough; | |
2911 | u32 sectors = 0, size = 0, pr_reg_type = 0; | |
2912 | u16 service_action; | |
2913 | u8 alua_ascq = 0; | |
2914 | /* | |
2915 | * Check for an existing UNIT ATTENTION condition | |
2916 | */ | |
2917 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | |
2918 | cmd->transport_wait_for_tasks = | |
2919 | &transport_nop_wait_for_tasks; | |
2920 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2921 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | |
5951146d | 2922 | return -EINVAL; |
c66ac9db NB |
2923 | } |
2924 | /* | |
2925 | * Check status of Asymmetric Logical Unit Assignment port | |
2926 | */ | |
e3d6f909 | 2927 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
c66ac9db NB |
2928 | if (ret != 0) { |
2929 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
2930 | /* | |
25985edc | 2931 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; |
c66ac9db NB |
2932 | * The ALUA additional sense code qualifier (ASCQ) is determined |
2933 | * by the ALUA primary or secondary access state.. | |
2934 | */ | |
2935 | if (ret > 0) { | |
2936 | #if 0 | |
6708bb27 | 2937 | pr_debug("[%s]: ALUA TG Port not available," |
c66ac9db | 2938 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", |
e3d6f909 | 2939 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
c66ac9db NB |
2940 | #endif |
2941 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | |
2942 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2943 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | |
5951146d | 2944 | return -EINVAL; |
c66ac9db NB |
2945 | } |
2946 | goto out_invalid_cdb_field; | |
2947 | } | |
2948 | /* | |
2949 | * Check status for SPC-3 Persistent Reservations | |
2950 | */ | |
e3d6f909 AG |
2951 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { |
2952 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | |
c66ac9db NB |
2953 | cmd, cdb, pr_reg_type) != 0) |
2954 | return transport_handle_reservation_conflict(cmd); | |
2955 | /* | |
2956 | * This means the CDB is allowed for the SCSI Initiator port | |
2957 | * when said port is *NOT* holding the legacy SPC-2 or | |
2958 | * SPC-3 Persistent Reservation. | |
2959 | */ | |
2960 | } | |
2961 | ||
2962 | switch (cdb[0]) { | |
2963 | case READ_6: | |
2964 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2965 | if (sector_ret) | |
2966 | goto out_unsupported_cdb; | |
2967 | size = transport_get_size(sectors, cdb, cmd); | |
2968 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 2969 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
2970 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2971 | break; | |
2972 | case READ_10: | |
2973 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2974 | if (sector_ret) | |
2975 | goto out_unsupported_cdb; | |
2976 | size = transport_get_size(sectors, cdb, cmd); | |
2977 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 2978 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2979 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2980 | break; | |
2981 | case READ_12: | |
2982 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
2983 | if (sector_ret) | |
2984 | goto out_unsupported_cdb; | |
2985 | size = transport_get_size(sectors, cdb, cmd); | |
2986 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a | 2987 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2988 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2989 | break; | |
2990 | case READ_16: | |
2991 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
2992 | if (sector_ret) | |
2993 | goto out_unsupported_cdb; | |
2994 | size = transport_get_size(sectors, cdb, cmd); | |
2995 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a | 2996 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
2997 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2998 | break; | |
2999 | case WRITE_6: | |
3000 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
3001 | if (sector_ret) | |
3002 | goto out_unsupported_cdb; | |
3003 | size = transport_get_size(sectors, cdb, cmd); | |
3004 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 3005 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
3006 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3007 | break; | |
3008 | case WRITE_10: | |
3009 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3010 | if (sector_ret) | |
3011 | goto out_unsupported_cdb; | |
3012 | size = transport_get_size(sectors, cdb, cmd); | |
3013 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a AG |
3014 | cmd->t_task_lba = transport_lba_32(cdb); |
3015 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3016 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3017 | break; | |
3018 | case WRITE_12: | |
3019 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3020 | if (sector_ret) | |
3021 | goto out_unsupported_cdb; | |
3022 | size = transport_get_size(sectors, cdb, cmd); | |
3023 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a AG |
3024 | cmd->t_task_lba = transport_lba_32(cdb); |
3025 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3026 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3027 | break; | |
3028 | case WRITE_16: | |
3029 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3030 | if (sector_ret) | |
3031 | goto out_unsupported_cdb; | |
3032 | size = transport_get_size(sectors, cdb, cmd); | |
3033 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a AG |
3034 | cmd->t_task_lba = transport_lba_64(cdb); |
3035 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3036 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3037 | break; | |
3038 | case XDWRITEREAD_10: | |
3039 | if ((cmd->data_direction != DMA_TO_DEVICE) || | |
a1d8b49a | 3040 | !(cmd->t_tasks_bidi)) |
c66ac9db NB |
3041 | goto out_invalid_cdb_field; |
3042 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3043 | if (sector_ret) | |
3044 | goto out_unsupported_cdb; | |
3045 | size = transport_get_size(sectors, cdb, cmd); | |
3046 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 3047 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db | 3048 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
e3d6f909 | 3049 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3050 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3051 | /* | |
3052 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3053 | */ | |
3054 | if (passthrough) | |
3055 | break; | |
3056 | /* | |
3057 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | |
3058 | */ | |
3059 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 3060 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
c66ac9db NB |
3061 | break; |
3062 | case VARIABLE_LENGTH_CMD: | |
3063 | service_action = get_unaligned_be16(&cdb[8]); | |
3064 | /* | |
3065 | * Determine if this is TCM/PSCSI device and we should disable | |
3066 | * internal emulation for this CDB. | |
3067 | */ | |
e3d6f909 | 3068 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3069 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3070 | ||
3071 | switch (service_action) { | |
3072 | case XDWRITEREAD_32: | |
3073 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3074 | if (sector_ret) | |
3075 | goto out_unsupported_cdb; | |
3076 | size = transport_get_size(sectors, cdb, cmd); | |
3077 | /* | |
3078 | * Use WRITE_32 and READ_32 opcodes for the emulated | |
3079 | * XDWRITE_READ_32 logic. | |
3080 | */ | |
3081 | cmd->transport_split_cdb = &split_cdb_XX_32; | |
a1d8b49a | 3082 | cmd->t_task_lba = transport_lba_64_ext(cdb); |
c66ac9db NB |
3083 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3084 | ||
3085 | /* | |
3086 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3087 | */ | |
3088 | if (passthrough) | |
3089 | break; | |
3090 | ||
3091 | /* | |
3092 | * Setup BIDI XOR callback to be run during | |
3093 | * transport_generic_complete_ok() | |
3094 | */ | |
3095 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 3096 | cmd->t_tasks_fua = (cdb[10] & 0x8); |
c66ac9db NB |
3097 | break; |
3098 | case WRITE_SAME_32: | |
3099 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3100 | if (sector_ret) | |
3101 | goto out_unsupported_cdb; | |
dd3a5ad8 | 3102 | |
6708bb27 | 3103 | if (sectors) |
12850626 | 3104 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
3105 | else { |
3106 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | |
3107 | " supported\n"); | |
3108 | goto out_invalid_cdb_field; | |
3109 | } | |
dd3a5ad8 | 3110 | |
a1d8b49a | 3111 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
c66ac9db NB |
3112 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3113 | ||
706d5860 | 3114 | if (target_check_write_same_discard(&cdb[10], dev) < 0) |
c66ac9db | 3115 | goto out_invalid_cdb_field; |
706d5860 | 3116 | |
c66ac9db NB |
3117 | break; |
3118 | default: | |
6708bb27 | 3119 | pr_err("VARIABLE_LENGTH_CMD service action" |
c66ac9db NB |
3120 | " 0x%04x not supported\n", service_action); |
3121 | goto out_unsupported_cdb; | |
3122 | } | |
3123 | break; | |
e434f1f1 | 3124 | case MAINTENANCE_IN: |
e3d6f909 | 3125 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3126 | /* MAINTENANCE_IN from SCC-2 */ |
3127 | /* | |
3128 | * Check for emulated MI_REPORT_TARGET_PGS. | |
3129 | */ | |
3130 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | |
3131 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3132 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3133 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3134 | core_emulate_report_target_port_groups : |
c66ac9db NB |
3135 | NULL; |
3136 | } | |
3137 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3138 | (cdb[8] << 8) | cdb[9]; | |
3139 | } else { | |
3140 | /* GPCMD_SEND_KEY from multi media commands */ | |
3141 | size = (cdb[8] << 8) + cdb[9]; | |
3142 | } | |
05d1c7c0 | 3143 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3144 | break; |
3145 | case MODE_SELECT: | |
3146 | size = cdb[4]; | |
3147 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3148 | break; | |
3149 | case MODE_SELECT_10: | |
3150 | size = (cdb[7] << 8) + cdb[8]; | |
3151 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3152 | break; | |
3153 | case MODE_SENSE: | |
3154 | size = cdb[4]; | |
05d1c7c0 | 3155 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3156 | break; |
3157 | case MODE_SENSE_10: | |
3158 | case GPCMD_READ_BUFFER_CAPACITY: | |
3159 | case GPCMD_SEND_OPC: | |
3160 | case LOG_SELECT: | |
3161 | case LOG_SENSE: | |
3162 | size = (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3163 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3164 | break; |
3165 | case READ_BLOCK_LIMITS: | |
3166 | size = READ_BLOCK_LEN; | |
05d1c7c0 | 3167 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3168 | break; |
3169 | case GPCMD_GET_CONFIGURATION: | |
3170 | case GPCMD_READ_FORMAT_CAPACITIES: | |
3171 | case GPCMD_READ_DISC_INFO: | |
3172 | case GPCMD_READ_TRACK_RZONE_INFO: | |
3173 | size = (cdb[7] << 8) + cdb[8]; | |
3174 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3175 | break; | |
3176 | case PERSISTENT_RESERVE_IN: | |
3177 | case PERSISTENT_RESERVE_OUT: | |
3178 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3179 | (su_dev->t10_pr.res_type == |
c66ac9db | 3180 | SPC3_PERSISTENT_RESERVATIONS) ? |
e3d6f909 | 3181 | core_scsi3_emulate_pr : NULL; |
c66ac9db | 3182 | size = (cdb[7] << 8) + cdb[8]; |
05d1c7c0 | 3183 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3184 | break; |
3185 | case GPCMD_MECHANISM_STATUS: | |
3186 | case GPCMD_READ_DVD_STRUCTURE: | |
3187 | size = (cdb[8] << 8) + cdb[9]; | |
3188 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3189 | break; | |
3190 | case READ_POSITION: | |
3191 | size = READ_POSITION_LEN; | |
05d1c7c0 | 3192 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db | 3193 | break; |
e434f1f1 | 3194 | case MAINTENANCE_OUT: |
e3d6f909 | 3195 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3196 | /* MAINTENANCE_OUT from SCC-2 |
3197 | * | |
3198 | * Check for emulated MO_SET_TARGET_PGS. | |
3199 | */ | |
3200 | if (cdb[1] == MO_SET_TARGET_PGS) { | |
3201 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3202 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3203 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3204 | core_emulate_set_target_port_groups : |
c66ac9db NB |
3205 | NULL; |
3206 | } | |
3207 | ||
3208 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3209 | (cdb[8] << 8) | cdb[9]; | |
3210 | } else { | |
3211 | /* GPCMD_REPORT_KEY from multi media commands */ | |
3212 | size = (cdb[8] << 8) + cdb[9]; | |
3213 | } | |
05d1c7c0 | 3214 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3215 | break; |
3216 | case INQUIRY: | |
3217 | size = (cdb[3] << 8) + cdb[4]; | |
3218 | /* | |
3219 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | |
3220 | * See spc4r17 section 5.3 | |
3221 | */ | |
5951146d | 3222 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3223 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 3224 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3225 | break; |
3226 | case READ_BUFFER: | |
3227 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3228 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3229 | break; |
3230 | case READ_CAPACITY: | |
3231 | size = READ_CAP_LEN; | |
05d1c7c0 | 3232 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3233 | break; |
3234 | case READ_MEDIA_SERIAL_NUMBER: | |
3235 | case SECURITY_PROTOCOL_IN: | |
3236 | case SECURITY_PROTOCOL_OUT: | |
3237 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
05d1c7c0 | 3238 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3239 | break; |
3240 | case SERVICE_ACTION_IN: | |
3241 | case ACCESS_CONTROL_IN: | |
3242 | case ACCESS_CONTROL_OUT: | |
3243 | case EXTENDED_COPY: | |
3244 | case READ_ATTRIBUTE: | |
3245 | case RECEIVE_COPY_RESULTS: | |
3246 | case WRITE_ATTRIBUTE: | |
3247 | size = (cdb[10] << 24) | (cdb[11] << 16) | | |
3248 | (cdb[12] << 8) | cdb[13]; | |
05d1c7c0 | 3249 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3250 | break; |
3251 | case RECEIVE_DIAGNOSTIC: | |
3252 | case SEND_DIAGNOSTIC: | |
3253 | size = (cdb[3] << 8) | cdb[4]; | |
05d1c7c0 | 3254 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3255 | break; |
3256 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | |
3257 | #if 0 | |
3258 | case GPCMD_READ_CD: | |
3259 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3260 | size = (2336 * sectors); | |
05d1c7c0 | 3261 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3262 | break; |
3263 | #endif | |
3264 | case READ_TOC: | |
3265 | size = cdb[8]; | |
05d1c7c0 | 3266 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3267 | break; |
3268 | case REQUEST_SENSE: | |
3269 | size = cdb[4]; | |
05d1c7c0 | 3270 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3271 | break; |
3272 | case READ_ELEMENT_STATUS: | |
3273 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | |
05d1c7c0 | 3274 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3275 | break; |
3276 | case WRITE_BUFFER: | |
3277 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3278 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3279 | break; |
3280 | case RESERVE: | |
3281 | case RESERVE_10: | |
3282 | /* | |
3283 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | |
3284 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3285 | */ | |
3286 | if (cdb[0] == RESERVE_10) | |
3287 | size = (cdb[7] << 8) | cdb[8]; | |
3288 | else | |
3289 | size = cmd->data_length; | |
3290 | ||
3291 | /* | |
3292 | * Setup the legacy emulated handler for SPC-2 and | |
3293 | * >= SPC-3 compatible reservation handling (CRH=1) | |
3294 | * Otherwise, we assume the underlying SCSI logic is | |
3295 | * is running in SPC_PASSTHROUGH, and wants reservations | |
3296 | * emulation disabled. | |
3297 | */ | |
3298 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3299 | (su_dev->t10_pr.res_type != |
c66ac9db | 3300 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3301 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3302 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3303 | break; | |
3304 | case RELEASE: | |
3305 | case RELEASE_10: | |
3306 | /* | |
3307 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | |
3308 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3309 | */ | |
3310 | if (cdb[0] == RELEASE_10) | |
3311 | size = (cdb[7] << 8) | cdb[8]; | |
3312 | else | |
3313 | size = cmd->data_length; | |
3314 | ||
3315 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3316 | (su_dev->t10_pr.res_type != |
c66ac9db | 3317 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3318 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3319 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3320 | break; | |
3321 | case SYNCHRONIZE_CACHE: | |
3322 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | |
3323 | /* | |
3324 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | |
3325 | */ | |
3326 | if (cdb[0] == SYNCHRONIZE_CACHE) { | |
3327 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
a1d8b49a | 3328 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
3329 | } else { |
3330 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
a1d8b49a | 3331 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
3332 | } |
3333 | if (sector_ret) | |
3334 | goto out_unsupported_cdb; | |
3335 | ||
3336 | size = transport_get_size(sectors, cdb, cmd); | |
3337 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3338 | ||
3339 | /* | |
3340 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | |
3341 | */ | |
e3d6f909 | 3342 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
c66ac9db NB |
3343 | break; |
3344 | /* | |
3345 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | |
3346 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | |
3347 | */ | |
3348 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | |
3349 | /* | |
3350 | * Check to ensure that LBA + Range does not exceed past end of | |
7abbe7f3 | 3351 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls |
c66ac9db | 3352 | */ |
7abbe7f3 NB |
3353 | if ((cmd->t_task_lba != 0) || (sectors != 0)) { |
3354 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
3355 | goto out_invalid_cdb_field; | |
3356 | } | |
c66ac9db NB |
3357 | break; |
3358 | case UNMAP: | |
3359 | size = get_unaligned_be16(&cdb[7]); | |
05d1c7c0 | 3360 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3361 | break; |
3362 | case WRITE_SAME_16: | |
3363 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3364 | if (sector_ret) | |
3365 | goto out_unsupported_cdb; | |
dd3a5ad8 | 3366 | |
6708bb27 | 3367 | if (sectors) |
12850626 | 3368 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
3369 | else { |
3370 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3371 | goto out_invalid_cdb_field; | |
3372 | } | |
dd3a5ad8 | 3373 | |
5db0753b | 3374 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
706d5860 NB |
3375 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3376 | ||
3377 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3378 | goto out_invalid_cdb_field; | |
3379 | break; | |
3380 | case WRITE_SAME: | |
3381 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3382 | if (sector_ret) | |
3383 | goto out_unsupported_cdb; | |
3384 | ||
3385 | if (sectors) | |
12850626 | 3386 | size = transport_get_size(1, cdb, cmd); |
706d5860 NB |
3387 | else { |
3388 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3389 | goto out_invalid_cdb_field; | |
c66ac9db | 3390 | } |
706d5860 NB |
3391 | |
3392 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | |
c66ac9db | 3393 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
706d5860 NB |
3394 | /* |
3395 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | |
3396 | * of byte 1 bit 3 UNMAP instead of original reserved field | |
3397 | */ | |
3398 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3399 | goto out_invalid_cdb_field; | |
c66ac9db NB |
3400 | break; |
3401 | case ALLOW_MEDIUM_REMOVAL: | |
3402 | case GPCMD_CLOSE_TRACK: | |
3403 | case ERASE: | |
3404 | case INITIALIZE_ELEMENT_STATUS: | |
3405 | case GPCMD_LOAD_UNLOAD: | |
3406 | case REZERO_UNIT: | |
3407 | case SEEK_10: | |
3408 | case GPCMD_SET_SPEED: | |
3409 | case SPACE: | |
3410 | case START_STOP: | |
3411 | case TEST_UNIT_READY: | |
3412 | case VERIFY: | |
3413 | case WRITE_FILEMARKS: | |
3414 | case MOVE_MEDIUM: | |
3415 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3416 | break; | |
3417 | case REPORT_LUNS: | |
3418 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3419 | transport_core_report_lun_response; |
c66ac9db NB |
3420 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3421 | /* | |
3422 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | |
3423 | * See spc4r17 section 5.3 | |
3424 | */ | |
5951146d | 3425 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3426 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 3427 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3428 | break; |
3429 | default: | |
6708bb27 | 3430 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" |
c66ac9db | 3431 | " 0x%02x, sending CHECK_CONDITION.\n", |
e3d6f909 | 3432 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
c66ac9db NB |
3433 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3434 | goto out_unsupported_cdb; | |
3435 | } | |
3436 | ||
3437 | if (size != cmd->data_length) { | |
6708bb27 | 3438 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
c66ac9db | 3439 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
e3d6f909 | 3440 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
c66ac9db NB |
3441 | cmd->data_length, size, cdb[0]); |
3442 | ||
3443 | cmd->cmd_spdtl = size; | |
3444 | ||
3445 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
6708bb27 | 3446 | pr_err("Rejecting underflow/overflow" |
c66ac9db NB |
3447 | " WRITE data\n"); |
3448 | goto out_invalid_cdb_field; | |
3449 | } | |
3450 | /* | |
3451 | * Reject READ_* or WRITE_* with overflow/underflow for | |
3452 | * type SCF_SCSI_DATA_SG_IO_CDB. | |
3453 | */ | |
6708bb27 AG |
3454 | if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { |
3455 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" | |
c66ac9db | 3456 | " CDB on non 512-byte sector setup subsystem" |
e3d6f909 | 3457 | " plugin: %s\n", dev->transport->name); |
c66ac9db NB |
3458 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3459 | goto out_invalid_cdb_field; | |
3460 | } | |
3461 | ||
3462 | if (size > cmd->data_length) { | |
3463 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | |
3464 | cmd->residual_count = (size - cmd->data_length); | |
3465 | } else { | |
3466 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | |
3467 | cmd->residual_count = (cmd->data_length - size); | |
3468 | } | |
3469 | cmd->data_length = size; | |
3470 | } | |
3471 | ||
d0229ae3 AG |
3472 | /* Let's limit control cdbs to a page, for simplicity's sake. */ |
3473 | if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && | |
3474 | size > PAGE_SIZE) | |
3475 | goto out_invalid_cdb_field; | |
3476 | ||
c66ac9db NB |
3477 | transport_set_supported_SAM_opcode(cmd); |
3478 | return ret; | |
3479 | ||
3480 | out_unsupported_cdb: | |
3481 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3482 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
5951146d | 3483 | return -EINVAL; |
c66ac9db NB |
3484 | out_invalid_cdb_field: |
3485 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3486 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 3487 | return -EINVAL; |
c66ac9db NB |
3488 | } |
3489 | ||
c66ac9db NB |
3490 | /* |
3491 | * Called from transport_generic_complete_ok() and | |
3492 | * transport_generic_request_failure() to determine which dormant/delayed | |
3493 | * and ordered cmds need to have their tasks added to the execution queue. | |
3494 | */ | |
3495 | static void transport_complete_task_attr(struct se_cmd *cmd) | |
3496 | { | |
5951146d | 3497 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
3498 | struct se_cmd *cmd_p, *cmd_tmp; |
3499 | int new_active_tasks = 0; | |
3500 | ||
e66ecd50 | 3501 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
c66ac9db NB |
3502 | atomic_dec(&dev->simple_cmds); |
3503 | smp_mb__after_atomic_dec(); | |
3504 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3505 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" |
c66ac9db NB |
3506 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
3507 | cmd->se_ordered_id); | |
e66ecd50 | 3508 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
3509 | atomic_dec(&dev->dev_hoq_count); |
3510 | smp_mb__after_atomic_dec(); | |
3511 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3512 | pr_debug("Incremented dev_cur_ordered_id: %u for" |
c66ac9db NB |
3513 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, |
3514 | cmd->se_ordered_id); | |
e66ecd50 | 3515 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
c66ac9db | 3516 | spin_lock(&dev->ordered_cmd_lock); |
5951146d | 3517 | list_del(&cmd->se_ordered_node); |
c66ac9db NB |
3518 | atomic_dec(&dev->dev_ordered_sync); |
3519 | smp_mb__after_atomic_dec(); | |
3520 | spin_unlock(&dev->ordered_cmd_lock); | |
3521 | ||
3522 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3523 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
c66ac9db NB |
3524 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
3525 | } | |
3526 | /* | |
3527 | * Process all commands up to the last received | |
3528 | * ORDERED task attribute which requires another blocking | |
3529 | * boundary | |
3530 | */ | |
3531 | spin_lock(&dev->delayed_cmd_lock); | |
3532 | list_for_each_entry_safe(cmd_p, cmd_tmp, | |
5951146d | 3533 | &dev->delayed_cmd_list, se_delayed_node) { |
c66ac9db | 3534 | |
5951146d | 3535 | list_del(&cmd_p->se_delayed_node); |
c66ac9db NB |
3536 | spin_unlock(&dev->delayed_cmd_lock); |
3537 | ||
6708bb27 | 3538 | pr_debug("Calling add_tasks() for" |
c66ac9db NB |
3539 | " cmd_p: 0x%02x Task Attr: 0x%02x" |
3540 | " Dormant -> Active, se_ordered_id: %u\n", | |
6708bb27 | 3541 | cmd_p->t_task_cdb[0], |
c66ac9db NB |
3542 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); |
3543 | ||
3544 | transport_add_tasks_from_cmd(cmd_p); | |
3545 | new_active_tasks++; | |
3546 | ||
3547 | spin_lock(&dev->delayed_cmd_lock); | |
e66ecd50 | 3548 | if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) |
c66ac9db NB |
3549 | break; |
3550 | } | |
3551 | spin_unlock(&dev->delayed_cmd_lock); | |
3552 | /* | |
3553 | * If new tasks have become active, wake up the transport thread | |
3554 | * to do the processing of the Active tasks. | |
3555 | */ | |
3556 | if (new_active_tasks != 0) | |
e3d6f909 | 3557 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
3558 | } |
3559 | ||
07bde79a NB |
3560 | static int transport_complete_qf(struct se_cmd *cmd) |
3561 | { | |
3562 | int ret = 0; | |
3563 | ||
3564 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) | |
3565 | return cmd->se_tfo->queue_status(cmd); | |
3566 | ||
3567 | switch (cmd->data_direction) { | |
3568 | case DMA_FROM_DEVICE: | |
3569 | ret = cmd->se_tfo->queue_data_in(cmd); | |
3570 | break; | |
3571 | case DMA_TO_DEVICE: | |
ec98f782 | 3572 | if (cmd->t_bidi_data_sg) { |
07bde79a NB |
3573 | ret = cmd->se_tfo->queue_data_in(cmd); |
3574 | if (ret < 0) | |
3575 | return ret; | |
3576 | } | |
3577 | /* Fall through for DMA_TO_DEVICE */ | |
3578 | case DMA_NONE: | |
3579 | ret = cmd->se_tfo->queue_status(cmd); | |
3580 | break; | |
3581 | default: | |
3582 | break; | |
3583 | } | |
3584 | ||
3585 | return ret; | |
3586 | } | |
3587 | ||
3588 | static void transport_handle_queue_full( | |
3589 | struct se_cmd *cmd, | |
3590 | struct se_device *dev, | |
3591 | int (*qf_callback)(struct se_cmd *)) | |
3592 | { | |
3593 | spin_lock_irq(&dev->qf_cmd_lock); | |
3594 | cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; | |
3595 | cmd->transport_qf_callback = qf_callback; | |
3596 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | |
3597 | atomic_inc(&dev->dev_qf_count); | |
3598 | smp_mb__after_atomic_inc(); | |
3599 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | |
3600 | ||
3601 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3602 | } | |
3603 | ||
c66ac9db NB |
3604 | static void transport_generic_complete_ok(struct se_cmd *cmd) |
3605 | { | |
07bde79a | 3606 | int reason = 0, ret; |
c66ac9db NB |
3607 | /* |
3608 | * Check if we need to move delayed/dormant tasks from cmds on the | |
3609 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | |
3610 | * Attribute. | |
3611 | */ | |
5951146d | 3612 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
c66ac9db | 3613 | transport_complete_task_attr(cmd); |
07bde79a NB |
3614 | /* |
3615 | * Check to schedule QUEUE_FULL work, or execute an existing | |
3616 | * cmd->transport_qf_callback() | |
3617 | */ | |
3618 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | |
3619 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3620 | ||
3621 | if (cmd->transport_qf_callback) { | |
3622 | ret = cmd->transport_qf_callback(cmd); | |
3623 | if (ret < 0) | |
3624 | goto queue_full; | |
3625 | ||
3626 | cmd->transport_qf_callback = NULL; | |
3627 | goto done; | |
3628 | } | |
c66ac9db NB |
3629 | /* |
3630 | * Check if we need to retrieve a sense buffer from | |
3631 | * the struct se_cmd in question. | |
3632 | */ | |
3633 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3634 | if (transport_get_sense_data(cmd) < 0) | |
3635 | reason = TCM_NON_EXISTENT_LUN; | |
3636 | ||
3637 | /* | |
3638 | * Only set when an struct se_task->task_scsi_status returned | |
3639 | * a non GOOD status. | |
3640 | */ | |
3641 | if (cmd->scsi_status) { | |
07bde79a | 3642 | ret = transport_send_check_condition_and_sense( |
c66ac9db | 3643 | cmd, reason, 1); |
07bde79a NB |
3644 | if (ret == -EAGAIN) |
3645 | goto queue_full; | |
3646 | ||
c66ac9db NB |
3647 | transport_lun_remove_cmd(cmd); |
3648 | transport_cmd_check_stop_to_fabric(cmd); | |
3649 | return; | |
3650 | } | |
3651 | } | |
3652 | /* | |
25985edc | 3653 | * Check for a callback, used by amongst other things |
c66ac9db NB |
3654 | * XDWRITE_READ_10 emulation. |
3655 | */ | |
3656 | if (cmd->transport_complete_callback) | |
3657 | cmd->transport_complete_callback(cmd); | |
3658 | ||
3659 | switch (cmd->data_direction) { | |
3660 | case DMA_FROM_DEVICE: | |
3661 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3662 | if (cmd->se_lun->lun_sep) { |
3663 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3664 | cmd->data_length; |
3665 | } | |
3666 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
c66ac9db | 3667 | |
07bde79a NB |
3668 | ret = cmd->se_tfo->queue_data_in(cmd); |
3669 | if (ret == -EAGAIN) | |
3670 | goto queue_full; | |
c66ac9db NB |
3671 | break; |
3672 | case DMA_TO_DEVICE: | |
3673 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3674 | if (cmd->se_lun->lun_sep) { |
3675 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += | |
c66ac9db NB |
3676 | cmd->data_length; |
3677 | } | |
3678 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3679 | /* | |
3680 | * Check if we need to send READ payload for BIDI-COMMAND | |
3681 | */ | |
ec98f782 | 3682 | if (cmd->t_bidi_data_sg) { |
c66ac9db | 3683 | spin_lock(&cmd->se_lun->lun_sep_lock); |
e3d6f909 AG |
3684 | if (cmd->se_lun->lun_sep) { |
3685 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3686 | cmd->data_length; |
3687 | } | |
3688 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
07bde79a NB |
3689 | ret = cmd->se_tfo->queue_data_in(cmd); |
3690 | if (ret == -EAGAIN) | |
3691 | goto queue_full; | |
c66ac9db NB |
3692 | break; |
3693 | } | |
3694 | /* Fall through for DMA_TO_DEVICE */ | |
3695 | case DMA_NONE: | |
07bde79a NB |
3696 | ret = cmd->se_tfo->queue_status(cmd); |
3697 | if (ret == -EAGAIN) | |
3698 | goto queue_full; | |
c66ac9db NB |
3699 | break; |
3700 | default: | |
3701 | break; | |
3702 | } | |
3703 | ||
07bde79a | 3704 | done: |
c66ac9db NB |
3705 | transport_lun_remove_cmd(cmd); |
3706 | transport_cmd_check_stop_to_fabric(cmd); | |
07bde79a NB |
3707 | return; |
3708 | ||
3709 | queue_full: | |
6708bb27 | 3710 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," |
07bde79a NB |
3711 | " data_direction: %d\n", cmd, cmd->data_direction); |
3712 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | |
c66ac9db NB |
3713 | } |
3714 | ||
3715 | static void transport_free_dev_tasks(struct se_cmd *cmd) | |
3716 | { | |
3717 | struct se_task *task, *task_tmp; | |
3718 | unsigned long flags; | |
3719 | ||
a1d8b49a | 3720 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3721 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 3722 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
3723 | if (atomic_read(&task->task_active)) |
3724 | continue; | |
3725 | ||
3726 | kfree(task->task_sg_bidi); | |
3727 | kfree(task->task_sg); | |
3728 | ||
3729 | list_del(&task->t_list); | |
3730 | ||
a1d8b49a | 3731 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 3732 | if (task->se_dev) |
e3d6f909 | 3733 | task->se_dev->transport->free_task(task); |
c66ac9db | 3734 | else |
6708bb27 | 3735 | pr_err("task[%u] - task->se_dev is NULL\n", |
c66ac9db | 3736 | task->task_no); |
a1d8b49a | 3737 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3738 | } |
a1d8b49a | 3739 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3740 | } |
3741 | ||
6708bb27 | 3742 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
c66ac9db | 3743 | { |
ec98f782 | 3744 | struct scatterlist *sg; |
ec98f782 | 3745 | int count; |
c66ac9db | 3746 | |
6708bb27 AG |
3747 | for_each_sg(sgl, sg, nents, count) |
3748 | __free_page(sg_page(sg)); | |
c66ac9db | 3749 | |
6708bb27 AG |
3750 | kfree(sgl); |
3751 | } | |
c66ac9db | 3752 | |
6708bb27 AG |
3753 | static inline void transport_free_pages(struct se_cmd *cmd) |
3754 | { | |
3755 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | |
3756 | return; | |
3757 | ||
3758 | transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); | |
ec98f782 AG |
3759 | cmd->t_data_sg = NULL; |
3760 | cmd->t_data_nents = 0; | |
c66ac9db | 3761 | |
6708bb27 | 3762 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); |
ec98f782 AG |
3763 | cmd->t_bidi_data_sg = NULL; |
3764 | cmd->t_bidi_data_nents = 0; | |
c66ac9db NB |
3765 | } |
3766 | ||
3767 | static inline void transport_release_tasks(struct se_cmd *cmd) | |
3768 | { | |
3769 | transport_free_dev_tasks(cmd); | |
3770 | } | |
3771 | ||
3772 | static inline int transport_dec_and_check(struct se_cmd *cmd) | |
3773 | { | |
3774 | unsigned long flags; | |
3775 | ||
a1d8b49a AG |
3776 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3777 | if (atomic_read(&cmd->t_fe_count)) { | |
6708bb27 | 3778 | if (!atomic_dec_and_test(&cmd->t_fe_count)) { |
a1d8b49a | 3779 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
3780 | flags); |
3781 | return 1; | |
3782 | } | |
3783 | } | |
3784 | ||
a1d8b49a | 3785 | if (atomic_read(&cmd->t_se_count)) { |
6708bb27 | 3786 | if (!atomic_dec_and_test(&cmd->t_se_count)) { |
a1d8b49a | 3787 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
3788 | flags); |
3789 | return 1; | |
3790 | } | |
3791 | } | |
a1d8b49a | 3792 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3793 | |
3794 | return 0; | |
3795 | } | |
3796 | ||
3797 | static void transport_release_fe_cmd(struct se_cmd *cmd) | |
3798 | { | |
3799 | unsigned long flags; | |
3800 | ||
3801 | if (transport_dec_and_check(cmd)) | |
3802 | return; | |
3803 | ||
a1d8b49a | 3804 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 3805 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 3806 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3807 | goto free_pages; |
3808 | } | |
a1d8b49a | 3809 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 3810 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3811 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3812 | |
3813 | transport_release_tasks(cmd); | |
3814 | free_pages: | |
3815 | transport_free_pages(cmd); | |
3816 | transport_free_se_cmd(cmd); | |
35462975 | 3817 | cmd->se_tfo->release_cmd(cmd); |
c66ac9db NB |
3818 | } |
3819 | ||
35462975 CH |
3820 | static int |
3821 | transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) | |
c66ac9db NB |
3822 | { |
3823 | unsigned long flags; | |
3824 | ||
c66ac9db NB |
3825 | if (transport_dec_and_check(cmd)) { |
3826 | if (session_reinstatement) { | |
a1d8b49a | 3827 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3828 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3829 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
3830 | flags); |
3831 | } | |
3832 | return 1; | |
3833 | } | |
3834 | ||
a1d8b49a | 3835 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 3836 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 3837 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3838 | goto free_pages; |
3839 | } | |
a1d8b49a | 3840 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 3841 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3842 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3843 | |
3844 | transport_release_tasks(cmd); | |
5951146d | 3845 | |
c66ac9db NB |
3846 | free_pages: |
3847 | transport_free_pages(cmd); | |
35462975 | 3848 | transport_release_cmd(cmd); |
c66ac9db NB |
3849 | return 0; |
3850 | } | |
3851 | ||
3852 | /* | |
ec98f782 AG |
3853 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of |
3854 | * allocating in the core. | |
c66ac9db NB |
3855 | * @cmd: Associated se_cmd descriptor |
3856 | * @mem: SGL style memory for TCM WRITE / READ | |
3857 | * @sg_mem_num: Number of SGL elements | |
3858 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | |
3859 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | |
3860 | * | |
3861 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | |
3862 | * of parameters. | |
3863 | */ | |
3864 | int transport_generic_map_mem_to_cmd( | |
3865 | struct se_cmd *cmd, | |
5951146d AG |
3866 | struct scatterlist *sgl, |
3867 | u32 sgl_count, | |
3868 | struct scatterlist *sgl_bidi, | |
3869 | u32 sgl_bidi_count) | |
c66ac9db | 3870 | { |
5951146d | 3871 | if (!sgl || !sgl_count) |
c66ac9db | 3872 | return 0; |
c66ac9db | 3873 | |
c66ac9db NB |
3874 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
3875 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | |
c66ac9db | 3876 | |
ec98f782 AG |
3877 | cmd->t_data_sg = sgl; |
3878 | cmd->t_data_nents = sgl_count; | |
c66ac9db | 3879 | |
ec98f782 AG |
3880 | if (sgl_bidi && sgl_bidi_count) { |
3881 | cmd->t_bidi_data_sg = sgl_bidi; | |
3882 | cmd->t_bidi_data_nents = sgl_bidi_count; | |
c66ac9db NB |
3883 | } |
3884 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
c66ac9db NB |
3885 | } |
3886 | ||
3887 | return 0; | |
3888 | } | |
3889 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |
3890 | ||
c66ac9db NB |
3891 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
3892 | { | |
5951146d | 3893 | struct se_device *dev = cmd->se_dev; |
01cde4d5 | 3894 | int set_counts = 1, rc, task_cdbs; |
c66ac9db | 3895 | |
ec98f782 AG |
3896 | /* |
3897 | * Setup any BIDI READ tasks and memory from | |
3898 | * cmd->t_mem_bidi_list so the READ struct se_tasks | |
3899 | * are queued first for the non pSCSI passthrough case. | |
3900 | */ | |
3901 | if (cmd->t_bidi_data_sg && | |
3902 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | |
3903 | rc = transport_allocate_tasks(cmd, | |
3904 | cmd->t_task_lba, | |
3905 | DMA_FROM_DEVICE, | |
3906 | cmd->t_bidi_data_sg, | |
3907 | cmd->t_bidi_data_nents); | |
6708bb27 | 3908 | if (rc <= 0) { |
c66ac9db NB |
3909 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3910 | cmd->scsi_sense_reason = | |
ec98f782 | 3911 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
01cde4d5 | 3912 | return -EINVAL; |
c66ac9db | 3913 | } |
ec98f782 AG |
3914 | atomic_inc(&cmd->t_fe_count); |
3915 | atomic_inc(&cmd->t_se_count); | |
3916 | set_counts = 0; | |
3917 | } | |
3918 | /* | |
3919 | * Setup the tasks and memory from cmd->t_mem_list | |
3920 | * Note for BIDI transfers this will contain the WRITE payload | |
3921 | */ | |
3922 | task_cdbs = transport_allocate_tasks(cmd, | |
3923 | cmd->t_task_lba, | |
3924 | cmd->data_direction, | |
3925 | cmd->t_data_sg, | |
3926 | cmd->t_data_nents); | |
6708bb27 | 3927 | if (task_cdbs <= 0) { |
ec98f782 AG |
3928 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3929 | cmd->scsi_sense_reason = | |
3930 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
01cde4d5 | 3931 | return -EINVAL; |
ec98f782 | 3932 | } |
c66ac9db | 3933 | |
ec98f782 AG |
3934 | if (set_counts) { |
3935 | atomic_inc(&cmd->t_fe_count); | |
3936 | atomic_inc(&cmd->t_se_count); | |
c66ac9db NB |
3937 | } |
3938 | ||
ec98f782 AG |
3939 | cmd->t_task_list_num = task_cdbs; |
3940 | ||
a1d8b49a AG |
3941 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); |
3942 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); | |
3943 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); | |
c66ac9db NB |
3944 | return 0; |
3945 | } | |
3946 | ||
05d1c7c0 AG |
3947 | void *transport_kmap_first_data_page(struct se_cmd *cmd) |
3948 | { | |
ec98f782 | 3949 | struct scatterlist *sg = cmd->t_data_sg; |
05d1c7c0 | 3950 | |
ec98f782 | 3951 | BUG_ON(!sg); |
05d1c7c0 | 3952 | /* |
ec98f782 AG |
3953 | * We need to take into account a possible offset here for fabrics like |
3954 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for | |
3955 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() | |
05d1c7c0 | 3956 | */ |
ec98f782 | 3957 | return kmap(sg_page(sg)) + sg->offset; |
05d1c7c0 AG |
3958 | } |
3959 | EXPORT_SYMBOL(transport_kmap_first_data_page); | |
3960 | ||
3961 | void transport_kunmap_first_data_page(struct se_cmd *cmd) | |
3962 | { | |
ec98f782 | 3963 | kunmap(sg_page(cmd->t_data_sg)); |
05d1c7c0 AG |
3964 | } |
3965 | EXPORT_SYMBOL(transport_kunmap_first_data_page); | |
3966 | ||
c66ac9db | 3967 | static int |
05d1c7c0 | 3968 | transport_generic_get_mem(struct se_cmd *cmd) |
c66ac9db | 3969 | { |
ec98f782 AG |
3970 | u32 length = cmd->data_length; |
3971 | unsigned int nents; | |
3972 | struct page *page; | |
3973 | int i = 0; | |
c66ac9db | 3974 | |
ec98f782 AG |
3975 | nents = DIV_ROUND_UP(length, PAGE_SIZE); |
3976 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); | |
3977 | if (!cmd->t_data_sg) | |
3978 | return -ENOMEM; | |
c66ac9db | 3979 | |
ec98f782 AG |
3980 | cmd->t_data_nents = nents; |
3981 | sg_init_table(cmd->t_data_sg, nents); | |
c66ac9db | 3982 | |
ec98f782 AG |
3983 | while (length) { |
3984 | u32 page_len = min_t(u32, length, PAGE_SIZE); | |
3985 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
3986 | if (!page) | |
3987 | goto out; | |
c66ac9db | 3988 | |
ec98f782 AG |
3989 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); |
3990 | length -= page_len; | |
3991 | i++; | |
c66ac9db | 3992 | } |
c66ac9db | 3993 | return 0; |
c66ac9db | 3994 | |
ec98f782 AG |
3995 | out: |
3996 | while (i >= 0) { | |
3997 | __free_page(sg_page(&cmd->t_data_sg[i])); | |
3998 | i--; | |
c66ac9db | 3999 | } |
ec98f782 AG |
4000 | kfree(cmd->t_data_sg); |
4001 | cmd->t_data_sg = NULL; | |
4002 | return -ENOMEM; | |
c66ac9db NB |
4003 | } |
4004 | ||
a1d8b49a AG |
4005 | /* Reduce sectors if they are too long for the device */ |
4006 | static inline sector_t transport_limit_task_sectors( | |
c66ac9db NB |
4007 | struct se_device *dev, |
4008 | unsigned long long lba, | |
a1d8b49a | 4009 | sector_t sectors) |
c66ac9db | 4010 | { |
a1d8b49a | 4011 | sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db | 4012 | |
a1d8b49a AG |
4013 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
4014 | if ((lba + sectors) > transport_dev_end_lba(dev)) | |
4015 | sectors = ((transport_dev_end_lba(dev) - lba) + 1); | |
c66ac9db | 4016 | |
a1d8b49a | 4017 | return sectors; |
c66ac9db NB |
4018 | } |
4019 | ||
c66ac9db NB |
4020 | |
4021 | /* | |
4022 | * This function can be used by HW target mode drivers to create a linked | |
4023 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | |
4024 | * This is intended to be called during the completion path by TCM Core | |
4025 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | |
4026 | */ | |
4027 | void transport_do_task_sg_chain(struct se_cmd *cmd) | |
4028 | { | |
ec98f782 AG |
4029 | struct scatterlist *sg_first = NULL; |
4030 | struct scatterlist *sg_prev = NULL; | |
4031 | int sg_prev_nents = 0; | |
4032 | struct scatterlist *sg; | |
c66ac9db | 4033 | struct se_task *task; |
ec98f782 | 4034 | u32 chained_nents = 0; |
c66ac9db NB |
4035 | int i; |
4036 | ||
ec98f782 AG |
4037 | BUG_ON(!cmd->se_tfo->task_sg_chaining); |
4038 | ||
c66ac9db NB |
4039 | /* |
4040 | * Walk the struct se_task list and setup scatterlist chains | |
a1d8b49a | 4041 | * for each contiguously allocated struct se_task->task_sg[]. |
c66ac9db | 4042 | */ |
a1d8b49a | 4043 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
ec98f782 | 4044 | if (!task->task_sg) |
c66ac9db NB |
4045 | continue; |
4046 | ||
ec98f782 AG |
4047 | if (!sg_first) { |
4048 | sg_first = task->task_sg; | |
6708bb27 | 4049 | chained_nents = task->task_sg_nents; |
97868c89 | 4050 | } else { |
ec98f782 | 4051 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
6708bb27 | 4052 | chained_nents += task->task_sg_nents; |
97868c89 | 4053 | } |
c3c74c7a NB |
4054 | /* |
4055 | * For the padded tasks, use the extra SGL vector allocated | |
4056 | * in transport_allocate_data_tasks() for the sg_prev_nents | |
4057 | * offset into sg_chain() above.. The last task of a | |
4058 | * multi-task list, or a single task will not have | |
4059 | * task->task_sg_padded set.. | |
4060 | */ | |
4061 | if (task->task_padded_sg) | |
4062 | sg_prev_nents = (task->task_sg_nents + 1); | |
4063 | else | |
4064 | sg_prev_nents = task->task_sg_nents; | |
ec98f782 AG |
4065 | |
4066 | sg_prev = task->task_sg; | |
c66ac9db NB |
4067 | } |
4068 | /* | |
4069 | * Setup the starting pointer and total t_tasks_sg_linked_no including | |
4070 | * padding SGs for linking and to mark the end. | |
4071 | */ | |
a1d8b49a | 4072 | cmd->t_tasks_sg_chained = sg_first; |
ec98f782 | 4073 | cmd->t_tasks_sg_chained_no = chained_nents; |
c66ac9db | 4074 | |
6708bb27 | 4075 | pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" |
a1d8b49a AG |
4076 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, |
4077 | cmd->t_tasks_sg_chained_no); | |
c66ac9db | 4078 | |
a1d8b49a AG |
4079 | for_each_sg(cmd->t_tasks_sg_chained, sg, |
4080 | cmd->t_tasks_sg_chained_no, i) { | |
c66ac9db | 4081 | |
6708bb27 | 4082 | pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", |
5951146d | 4083 | i, sg, sg_page(sg), sg->length, sg->offset); |
c66ac9db | 4084 | if (sg_is_chain(sg)) |
6708bb27 | 4085 | pr_debug("SG: %p sg_is_chain=1\n", sg); |
c66ac9db | 4086 | if (sg_is_last(sg)) |
6708bb27 | 4087 | pr_debug("SG: %p sg_is_last=1\n", sg); |
c66ac9db | 4088 | } |
c66ac9db NB |
4089 | } |
4090 | EXPORT_SYMBOL(transport_do_task_sg_chain); | |
4091 | ||
a1d8b49a AG |
4092 | /* |
4093 | * Break up cmd into chunks transport can handle | |
4094 | */ | |
ec98f782 | 4095 | static int transport_allocate_data_tasks( |
c66ac9db NB |
4096 | struct se_cmd *cmd, |
4097 | unsigned long long lba, | |
c66ac9db | 4098 | enum dma_data_direction data_direction, |
ec98f782 AG |
4099 | struct scatterlist *sgl, |
4100 | unsigned int sgl_nents) | |
c66ac9db NB |
4101 | { |
4102 | unsigned char *cdb = NULL; | |
4103 | struct se_task *task; | |
5951146d | 4104 | struct se_device *dev = cmd->se_dev; |
ec98f782 | 4105 | unsigned long flags; |
1d20bb61 | 4106 | int task_count, i, ret; |
277c5f27 | 4107 | sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
ec98f782 AG |
4108 | u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; |
4109 | struct scatterlist *sg; | |
4110 | struct scatterlist *cmd_sg; | |
a1d8b49a | 4111 | |
ec98f782 AG |
4112 | WARN_ON(cmd->data_length % sector_size); |
4113 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); | |
277c5f27 NB |
4114 | task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); |
4115 | ||
ec98f782 AG |
4116 | cmd_sg = sgl; |
4117 | for (i = 0; i < task_count; i++) { | |
c3c74c7a | 4118 | unsigned int task_size, task_sg_nents_padded; |
ec98f782 | 4119 | int count; |
a1d8b49a | 4120 | |
c66ac9db | 4121 | task = transport_generic_get_task(cmd, data_direction); |
a1d8b49a | 4122 | if (!task) |
ec98f782 | 4123 | return -ENOMEM; |
c66ac9db | 4124 | |
c66ac9db | 4125 | task->task_lba = lba; |
ec98f782 AG |
4126 | task->task_sectors = min(sectors, dev_max_sectors); |
4127 | task->task_size = task->task_sectors * sector_size; | |
c66ac9db | 4128 | |
e3d6f909 | 4129 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4130 | BUG_ON(!cdb); |
4131 | ||
4132 | memcpy(cdb, cmd->t_task_cdb, | |
4133 | scsi_command_size(cmd->t_task_cdb)); | |
4134 | ||
4135 | /* Update new cdb with updated lba/sectors */ | |
3a867205 | 4136 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
525a48a2 NB |
4137 | /* |
4138 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks | |
4139 | * in order to calculate the number per task SGL entries | |
4140 | */ | |
4141 | task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); | |
c66ac9db | 4142 | /* |
ec98f782 AG |
4143 | * Check if the fabric module driver is requesting that all |
4144 | * struct se_task->task_sg[] be chained together.. If so, | |
4145 | * then allocate an extra padding SG entry for linking and | |
c3c74c7a NB |
4146 | * marking the end of the chained SGL for every task except |
4147 | * the last one for (task_count > 1) operation, or skipping | |
4148 | * the extra padding for the (task_count == 1) case. | |
c66ac9db | 4149 | */ |
c3c74c7a NB |
4150 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
4151 | task_sg_nents_padded = (task->task_sg_nents + 1); | |
ec98f782 | 4152 | task->task_padded_sg = 1; |
c3c74c7a NB |
4153 | } else |
4154 | task_sg_nents_padded = task->task_sg_nents; | |
c66ac9db | 4155 | |
1d20bb61 | 4156 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
c3c74c7a | 4157 | task_sg_nents_padded, GFP_KERNEL); |
ec98f782 AG |
4158 | if (!task->task_sg) { |
4159 | cmd->se_dev->transport->free_task(task); | |
4160 | return -ENOMEM; | |
4161 | } | |
4162 | ||
c3c74c7a | 4163 | sg_init_table(task->task_sg, task_sg_nents_padded); |
c66ac9db | 4164 | |
ec98f782 AG |
4165 | task_size = task->task_size; |
4166 | ||
4167 | /* Build new sgl, only up to task_size */ | |
6708bb27 | 4168 | for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { |
ec98f782 AG |
4169 | if (cmd_sg->length > task_size) |
4170 | break; | |
4171 | ||
4172 | *sg = *cmd_sg; | |
4173 | task_size -= cmd_sg->length; | |
4174 | cmd_sg = sg_next(cmd_sg); | |
c66ac9db | 4175 | } |
c66ac9db | 4176 | |
ec98f782 AG |
4177 | lba += task->task_sectors; |
4178 | sectors -= task->task_sectors; | |
c66ac9db | 4179 | |
ec98f782 AG |
4180 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4181 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
4182 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4183 | } |
1d20bb61 NB |
4184 | /* |
4185 | * Now perform the memory map of task->task_sg[] into backend | |
4186 | * subsystem memory.. | |
4187 | */ | |
4188 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | |
4189 | if (atomic_read(&task->task_sent)) | |
4190 | continue; | |
4191 | if (!dev->transport->map_data_SG) | |
4192 | continue; | |
4193 | ||
4194 | ret = dev->transport->map_data_SG(task); | |
4195 | if (ret < 0) | |
4196 | return 0; | |
4197 | } | |
c66ac9db | 4198 | |
ec98f782 | 4199 | return task_count; |
c66ac9db NB |
4200 | } |
4201 | ||
4202 | static int | |
ec98f782 | 4203 | transport_allocate_control_task(struct se_cmd *cmd) |
c66ac9db | 4204 | { |
5951146d | 4205 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4206 | unsigned char *cdb; |
4207 | struct se_task *task; | |
ec98f782 | 4208 | unsigned long flags; |
6708bb27 | 4209 | int ret = 0; |
c66ac9db NB |
4210 | |
4211 | task = transport_generic_get_task(cmd, cmd->data_direction); | |
4212 | if (!task) | |
ec98f782 | 4213 | return -ENOMEM; |
c66ac9db | 4214 | |
e3d6f909 | 4215 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4216 | BUG_ON(!cdb); |
4217 | memcpy(cdb, cmd->t_task_cdb, | |
4218 | scsi_command_size(cmd->t_task_cdb)); | |
c66ac9db | 4219 | |
ec98f782 AG |
4220 | task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, |
4221 | GFP_KERNEL); | |
4222 | if (!task->task_sg) { | |
4223 | cmd->se_dev->transport->free_task(task); | |
4224 | return -ENOMEM; | |
4225 | } | |
4226 | ||
4227 | memcpy(task->task_sg, cmd->t_data_sg, | |
4228 | sizeof(struct scatterlist) * cmd->t_data_nents); | |
c66ac9db | 4229 | task->task_size = cmd->data_length; |
6708bb27 | 4230 | task->task_sg_nents = cmd->t_data_nents; |
c66ac9db | 4231 | |
ec98f782 AG |
4232 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4233 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
4234 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
4235 | |
4236 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | |
1d20bb61 NB |
4237 | if (dev->transport->map_control_SG) |
4238 | ret = dev->transport->map_control_SG(task); | |
c66ac9db NB |
4239 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { |
4240 | if (dev->transport->cdb_none) | |
6708bb27 | 4241 | ret = dev->transport->cdb_none(task); |
c66ac9db | 4242 | } else { |
6708bb27 | 4243 | pr_err("target: Unknown control cmd type!\n"); |
c66ac9db | 4244 | BUG(); |
ec98f782 | 4245 | } |
6708bb27 AG |
4246 | |
4247 | /* Success! Return number of tasks allocated */ | |
4248 | if (ret == 0) | |
4249 | return 1; | |
4250 | return ret; | |
ec98f782 AG |
4251 | } |
4252 | ||
4253 | static u32 transport_allocate_tasks( | |
4254 | struct se_cmd *cmd, | |
4255 | unsigned long long lba, | |
4256 | enum dma_data_direction data_direction, | |
4257 | struct scatterlist *sgl, | |
4258 | unsigned int sgl_nents) | |
4259 | { | |
01cde4d5 NB |
4260 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
4261 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
4262 | return -EINVAL; | |
4263 | ||
ec98f782 AG |
4264 | return transport_allocate_data_tasks(cmd, lba, data_direction, |
4265 | sgl, sgl_nents); | |
01cde4d5 | 4266 | } else |
6708bb27 AG |
4267 | return transport_allocate_control_task(cmd); |
4268 | ||
c66ac9db NB |
4269 | } |
4270 | ||
ec98f782 | 4271 | |
c66ac9db NB |
4272 | /* transport_generic_new_cmd(): Called from transport_processing_thread() |
4273 | * | |
4274 | * Allocate storage transport resources from a set of values predefined | |
4275 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | |
4276 | * Any non zero return here is treated as an "out of resource' op here. | |
4277 | */ | |
4278 | /* | |
4279 | * Generate struct se_task(s) and/or their payloads for this CDB. | |
4280 | */ | |
a1d8b49a | 4281 | int transport_generic_new_cmd(struct se_cmd *cmd) |
c66ac9db | 4282 | { |
c66ac9db NB |
4283 | int ret = 0; |
4284 | ||
4285 | /* | |
4286 | * Determine is the TCM fabric module has already allocated physical | |
4287 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | |
ec98f782 | 4288 | * beforehand. |
c66ac9db | 4289 | */ |
ec98f782 AG |
4290 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
4291 | cmd->data_length) { | |
05d1c7c0 | 4292 | ret = transport_generic_get_mem(cmd); |
c66ac9db NB |
4293 | if (ret < 0) |
4294 | return ret; | |
4295 | } | |
1d20bb61 NB |
4296 | /* |
4297 | * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for | |
4298 | * control or data CDB types, and perform the map to backend subsystem | |
4299 | * code from SGL memory allocated here by transport_generic_get_mem(), or | |
4300 | * via pre-existing SGL memory setup explictly by fabric module code with | |
4301 | * transport_generic_map_mem_to_cmd(). | |
4302 | */ | |
c66ac9db NB |
4303 | ret = transport_new_cmd_obj(cmd); |
4304 | if (ret < 0) | |
4305 | return ret; | |
c66ac9db | 4306 | /* |
a1d8b49a | 4307 | * For WRITEs, let the fabric know its buffer is ready.. |
c66ac9db NB |
4308 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
4309 | * will be added to the struct se_device execution queue after its WRITE | |
4310 | * data has arrived. (ie: It gets handled by the transport processing | |
4311 | * thread a second time) | |
4312 | */ | |
4313 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
4314 | transport_add_tasks_to_state_queue(cmd); | |
4315 | return transport_generic_write_pending(cmd); | |
4316 | } | |
4317 | /* | |
4318 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | |
4319 | * to the execution queue. | |
4320 | */ | |
4321 | transport_execute_tasks(cmd); | |
4322 | return 0; | |
4323 | } | |
a1d8b49a | 4324 | EXPORT_SYMBOL(transport_generic_new_cmd); |
c66ac9db NB |
4325 | |
4326 | /* transport_generic_process_write(): | |
4327 | * | |
4328 | * | |
4329 | */ | |
4330 | void transport_generic_process_write(struct se_cmd *cmd) | |
4331 | { | |
c66ac9db NB |
4332 | transport_execute_tasks(cmd); |
4333 | } | |
4334 | EXPORT_SYMBOL(transport_generic_process_write); | |
4335 | ||
07bde79a NB |
4336 | static int transport_write_pending_qf(struct se_cmd *cmd) |
4337 | { | |
4338 | return cmd->se_tfo->write_pending(cmd); | |
4339 | } | |
4340 | ||
c66ac9db NB |
4341 | /* transport_generic_write_pending(): |
4342 | * | |
4343 | * | |
4344 | */ | |
4345 | static int transport_generic_write_pending(struct se_cmd *cmd) | |
4346 | { | |
4347 | unsigned long flags; | |
4348 | int ret; | |
4349 | ||
a1d8b49a | 4350 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4351 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
a1d8b49a | 4352 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
07bde79a NB |
4353 | |
4354 | if (cmd->transport_qf_callback) { | |
4355 | ret = cmd->transport_qf_callback(cmd); | |
4356 | if (ret == -EAGAIN) | |
4357 | goto queue_full; | |
4358 | else if (ret < 0) | |
4359 | return ret; | |
4360 | ||
4361 | cmd->transport_qf_callback = NULL; | |
4362 | return 0; | |
4363 | } | |
05d1c7c0 | 4364 | |
c66ac9db NB |
4365 | /* |
4366 | * Clear the se_cmd for WRITE_PENDING status in order to set | |
a1d8b49a | 4367 | * cmd->t_transport_active=0 so that transport_generic_handle_data |
c66ac9db | 4368 | * can be called from HW target mode interrupt code. This is safe |
e3d6f909 | 4369 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending |
c66ac9db NB |
4370 | * because the se_cmd->se_lun pointer is not being cleared. |
4371 | */ | |
4372 | transport_cmd_check_stop(cmd, 1, 0); | |
4373 | ||
4374 | /* | |
4375 | * Call the fabric write_pending function here to let the | |
4376 | * frontend know that WRITE buffers are ready. | |
4377 | */ | |
e3d6f909 | 4378 | ret = cmd->se_tfo->write_pending(cmd); |
07bde79a NB |
4379 | if (ret == -EAGAIN) |
4380 | goto queue_full; | |
4381 | else if (ret < 0) | |
c66ac9db NB |
4382 | return ret; |
4383 | ||
4384 | return PYX_TRANSPORT_WRITE_PENDING; | |
07bde79a NB |
4385 | |
4386 | queue_full: | |
6708bb27 | 4387 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); |
07bde79a NB |
4388 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; |
4389 | transport_handle_queue_full(cmd, cmd->se_dev, | |
4390 | transport_write_pending_qf); | |
4391 | return ret; | |
c66ac9db NB |
4392 | } |
4393 | ||
35462975 | 4394 | void transport_release_cmd(struct se_cmd *cmd) |
c66ac9db | 4395 | { |
e3d6f909 | 4396 | BUG_ON(!cmd->se_tfo); |
c66ac9db NB |
4397 | |
4398 | transport_free_se_cmd(cmd); | |
35462975 | 4399 | cmd->se_tfo->release_cmd(cmd); |
c66ac9db | 4400 | } |
35462975 | 4401 | EXPORT_SYMBOL(transport_release_cmd); |
c66ac9db NB |
4402 | |
4403 | /* transport_generic_free_cmd(): | |
4404 | * | |
4405 | * Called from processing frontend to release storage engine resources | |
4406 | */ | |
4407 | void transport_generic_free_cmd( | |
4408 | struct se_cmd *cmd, | |
4409 | int wait_for_tasks, | |
c66ac9db NB |
4410 | int session_reinstatement) |
4411 | { | |
5951146d | 4412 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) |
35462975 | 4413 | transport_release_cmd(cmd); |
c66ac9db NB |
4414 | else { |
4415 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | |
4416 | ||
e3d6f909 | 4417 | if (cmd->se_lun) { |
c66ac9db | 4418 | #if 0 |
6708bb27 | 4419 | pr_debug("cmd: %p ITT: 0x%08x contains" |
e3d6f909 AG |
4420 | " cmd->se_lun\n", cmd, |
4421 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db NB |
4422 | #endif |
4423 | transport_lun_remove_cmd(cmd); | |
4424 | } | |
4425 | ||
4426 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | |
4427 | cmd->transport_wait_for_tasks(cmd, 0, 0); | |
4428 | ||
f4366772 NB |
4429 | transport_free_dev_tasks(cmd); |
4430 | ||
35462975 | 4431 | transport_generic_remove(cmd, session_reinstatement); |
c66ac9db NB |
4432 | } |
4433 | } | |
4434 | EXPORT_SYMBOL(transport_generic_free_cmd); | |
4435 | ||
4436 | static void transport_nop_wait_for_tasks( | |
4437 | struct se_cmd *cmd, | |
4438 | int remove_cmd, | |
4439 | int session_reinstatement) | |
4440 | { | |
4441 | return; | |
4442 | } | |
4443 | ||
4444 | /* transport_lun_wait_for_tasks(): | |
4445 | * | |
4446 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | |
4447 | * an struct se_lun to be successfully shutdown. | |
4448 | */ | |
4449 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |
4450 | { | |
4451 | unsigned long flags; | |
4452 | int ret; | |
4453 | /* | |
4454 | * If the frontend has already requested this struct se_cmd to | |
4455 | * be stopped, we can safely ignore this struct se_cmd. | |
4456 | */ | |
a1d8b49a AG |
4457 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4458 | if (atomic_read(&cmd->t_transport_stop)) { | |
4459 | atomic_set(&cmd->transport_lun_stop, 0); | |
6708bb27 | 4460 | pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
e3d6f909 | 4461 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4462 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4463 | transport_cmd_check_stop(cmd, 1, 0); |
e3d6f909 | 4464 | return -EPERM; |
c66ac9db | 4465 | } |
a1d8b49a AG |
4466 | atomic_set(&cmd->transport_lun_fe_stop, 1); |
4467 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4468 | |
5951146d | 4469 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
4470 | |
4471 | ret = transport_stop_tasks_for_cmd(cmd); | |
4472 | ||
6708bb27 AG |
4473 | pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" |
4474 | " %d\n", cmd, cmd->t_task_list_num, ret); | |
c66ac9db | 4475 | if (!ret) { |
6708bb27 | 4476 | pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
e3d6f909 | 4477 | cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4478 | wait_for_completion(&cmd->transport_lun_stop_comp); |
6708bb27 | 4479 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
e3d6f909 | 4480 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4481 | } |
5951146d | 4482 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
4483 | |
4484 | return 0; | |
4485 | } | |
4486 | ||
c66ac9db NB |
4487 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) |
4488 | { | |
4489 | struct se_cmd *cmd = NULL; | |
4490 | unsigned long lun_flags, cmd_flags; | |
4491 | /* | |
4492 | * Do exception processing and return CHECK_CONDITION status to the | |
4493 | * Initiator Port. | |
4494 | */ | |
4495 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5951146d AG |
4496 | while (!list_empty(&lun->lun_cmd_list)) { |
4497 | cmd = list_first_entry(&lun->lun_cmd_list, | |
4498 | struct se_cmd, se_lun_node); | |
4499 | list_del(&cmd->se_lun_node); | |
4500 | ||
a1d8b49a | 4501 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db NB |
4502 | /* |
4503 | * This will notify iscsi_target_transport.c: | |
4504 | * transport_cmd_check_stop() that a LUN shutdown is in | |
4505 | * progress for the iscsi_cmd_t. | |
4506 | */ | |
a1d8b49a | 4507 | spin_lock(&cmd->t_state_lock); |
6708bb27 | 4508 | pr_debug("SE_LUN[%d] - Setting cmd->transport" |
c66ac9db | 4509 | "_lun_stop for ITT: 0x%08x\n", |
e3d6f909 AG |
4510 | cmd->se_lun->unpacked_lun, |
4511 | cmd->se_tfo->get_task_tag(cmd)); | |
a1d8b49a AG |
4512 | atomic_set(&cmd->transport_lun_stop, 1); |
4513 | spin_unlock(&cmd->t_state_lock); | |
c66ac9db NB |
4514 | |
4515 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4516 | ||
6708bb27 AG |
4517 | if (!cmd->se_lun) { |
4518 | pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", | |
e3d6f909 AG |
4519 | cmd->se_tfo->get_task_tag(cmd), |
4520 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | |
c66ac9db NB |
4521 | BUG(); |
4522 | } | |
4523 | /* | |
4524 | * If the Storage engine still owns the iscsi_cmd_t, determine | |
4525 | * and/or stop its context. | |
4526 | */ | |
6708bb27 | 4527 | pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" |
e3d6f909 AG |
4528 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
4529 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4530 | |
e3d6f909 | 4531 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
c66ac9db NB |
4532 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4533 | continue; | |
4534 | } | |
4535 | ||
6708bb27 | 4536 | pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
c66ac9db | 4537 | "_wait_for_tasks(): SUCCESS\n", |
e3d6f909 AG |
4538 | cmd->se_lun->unpacked_lun, |
4539 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4540 | |
a1d8b49a | 4541 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
6708bb27 | 4542 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 4543 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4544 | goto check_cond; |
4545 | } | |
a1d8b49a | 4546 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 4547 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 4548 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4549 | |
4550 | transport_free_dev_tasks(cmd); | |
4551 | /* | |
4552 | * The Storage engine stopped this struct se_cmd before it was | |
4553 | * send to the fabric frontend for delivery back to the | |
4554 | * Initiator Node. Return this SCSI CDB back with an | |
4555 | * CHECK_CONDITION status. | |
4556 | */ | |
4557 | check_cond: | |
4558 | transport_send_check_condition_and_sense(cmd, | |
4559 | TCM_NON_EXISTENT_LUN, 0); | |
4560 | /* | |
4561 | * If the fabric frontend is waiting for this iscsi_cmd_t to | |
4562 | * be released, notify the waiting thread now that LU has | |
4563 | * finished accessing it. | |
4564 | */ | |
a1d8b49a AG |
4565 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
4566 | if (atomic_read(&cmd->transport_lun_fe_stop)) { | |
6708bb27 | 4567 | pr_debug("SE_LUN[%d] - Detected FE stop for" |
c66ac9db NB |
4568 | " struct se_cmd: %p ITT: 0x%08x\n", |
4569 | lun->unpacked_lun, | |
e3d6f909 | 4570 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4571 | |
a1d8b49a | 4572 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
4573 | cmd_flags); |
4574 | transport_cmd_check_stop(cmd, 1, 0); | |
a1d8b49a | 4575 | complete(&cmd->transport_lun_fe_stop_comp); |
c66ac9db NB |
4576 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4577 | continue; | |
4578 | } | |
6708bb27 | 4579 | pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
e3d6f909 | 4580 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4581 | |
a1d8b49a | 4582 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4583 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4584 | } | |
4585 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4586 | } | |
4587 | ||
4588 | static int transport_clear_lun_thread(void *p) | |
4589 | { | |
4590 | struct se_lun *lun = (struct se_lun *)p; | |
4591 | ||
4592 | __transport_clear_lun_from_sessions(lun); | |
4593 | complete(&lun->lun_shutdown_comp); | |
4594 | ||
4595 | return 0; | |
4596 | } | |
4597 | ||
4598 | int transport_clear_lun_from_sessions(struct se_lun *lun) | |
4599 | { | |
4600 | struct task_struct *kt; | |
4601 | ||
5951146d | 4602 | kt = kthread_run(transport_clear_lun_thread, lun, |
c66ac9db NB |
4603 | "tcm_cl_%u", lun->unpacked_lun); |
4604 | if (IS_ERR(kt)) { | |
6708bb27 | 4605 | pr_err("Unable to start clear_lun thread\n"); |
e3d6f909 | 4606 | return PTR_ERR(kt); |
c66ac9db NB |
4607 | } |
4608 | wait_for_completion(&lun->lun_shutdown_comp); | |
4609 | ||
4610 | return 0; | |
4611 | } | |
4612 | ||
4613 | /* transport_generic_wait_for_tasks(): | |
4614 | * | |
4615 | * Called from frontend or passthrough context to wait for storage engine | |
4616 | * to pause and/or release frontend generated struct se_cmd. | |
4617 | */ | |
4618 | static void transport_generic_wait_for_tasks( | |
4619 | struct se_cmd *cmd, | |
4620 | int remove_cmd, | |
4621 | int session_reinstatement) | |
4622 | { | |
4623 | unsigned long flags; | |
4624 | ||
4625 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | |
4626 | return; | |
4627 | ||
a1d8b49a | 4628 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
4629 | /* |
4630 | * If we are already stopped due to an external event (ie: LUN shutdown) | |
4631 | * sleep until the connection can have the passed struct se_cmd back. | |
a1d8b49a | 4632 | * The cmd->transport_lun_stopped_sem will be upped by |
c66ac9db NB |
4633 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
4634 | * has completed its operation on the struct se_cmd. | |
4635 | */ | |
a1d8b49a | 4636 | if (atomic_read(&cmd->transport_lun_stop)) { |
c66ac9db | 4637 | |
6708bb27 | 4638 | pr_debug("wait_for_tasks: Stopping" |
e3d6f909 | 4639 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
c66ac9db | 4640 | "_stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4641 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4642 | /* |
4643 | * There is a special case for WRITES where a FE exception + | |
4644 | * LUN shutdown means ConfigFS context is still sleeping on | |
4645 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | |
4646 | * We go ahead and up transport_lun_stop_comp just to be sure | |
4647 | * here. | |
4648 | */ | |
a1d8b49a AG |
4649 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4650 | complete(&cmd->transport_lun_stop_comp); | |
4651 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); | |
4652 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
c66ac9db NB |
4653 | |
4654 | transport_all_task_dev_remove_state(cmd); | |
4655 | /* | |
4656 | * At this point, the frontend who was the originator of this | |
4657 | * struct se_cmd, now owns the structure and can be released through | |
4658 | * normal means below. | |
4659 | */ | |
6708bb27 | 4660 | pr_debug("wait_for_tasks: Stopped" |
e3d6f909 | 4661 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
c66ac9db | 4662 | "stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4663 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4664 | |
a1d8b49a | 4665 | atomic_set(&cmd->transport_lun_stop, 0); |
c66ac9db | 4666 | } |
a1d8b49a AG |
4667 | if (!atomic_read(&cmd->t_transport_active) || |
4668 | atomic_read(&cmd->t_transport_aborted)) | |
c66ac9db NB |
4669 | goto remove; |
4670 | ||
a1d8b49a | 4671 | atomic_set(&cmd->t_transport_stop, 1); |
c66ac9db | 4672 | |
6708bb27 | 4673 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
c66ac9db | 4674 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" |
e3d6f909 AG |
4675 | " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
4676 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, | |
c66ac9db NB |
4677 | cmd->deferred_t_state); |
4678 | ||
a1d8b49a | 4679 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4680 | |
5951146d | 4681 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db | 4682 | |
a1d8b49a | 4683 | wait_for_completion(&cmd->t_transport_stop_comp); |
c66ac9db | 4684 | |
a1d8b49a AG |
4685 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4686 | atomic_set(&cmd->t_transport_active, 0); | |
4687 | atomic_set(&cmd->t_transport_stop, 0); | |
c66ac9db | 4688 | |
6708bb27 | 4689 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
a1d8b49a | 4690 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
e3d6f909 | 4691 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4692 | remove: |
a1d8b49a | 4693 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4694 | if (!remove_cmd) |
4695 | return; | |
4696 | ||
35462975 | 4697 | transport_generic_free_cmd(cmd, 0, session_reinstatement); |
c66ac9db NB |
4698 | } |
4699 | ||
4700 | static int transport_get_sense_codes( | |
4701 | struct se_cmd *cmd, | |
4702 | u8 *asc, | |
4703 | u8 *ascq) | |
4704 | { | |
4705 | *asc = cmd->scsi_asc; | |
4706 | *ascq = cmd->scsi_ascq; | |
4707 | ||
4708 | return 0; | |
4709 | } | |
4710 | ||
4711 | static int transport_set_sense_codes( | |
4712 | struct se_cmd *cmd, | |
4713 | u8 asc, | |
4714 | u8 ascq) | |
4715 | { | |
4716 | cmd->scsi_asc = asc; | |
4717 | cmd->scsi_ascq = ascq; | |
4718 | ||
4719 | return 0; | |
4720 | } | |
4721 | ||
4722 | int transport_send_check_condition_and_sense( | |
4723 | struct se_cmd *cmd, | |
4724 | u8 reason, | |
4725 | int from_transport) | |
4726 | { | |
4727 | unsigned char *buffer = cmd->sense_buffer; | |
4728 | unsigned long flags; | |
4729 | int offset; | |
4730 | u8 asc = 0, ascq = 0; | |
4731 | ||
a1d8b49a | 4732 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4733 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 4734 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4735 | return 0; |
4736 | } | |
4737 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | |
a1d8b49a | 4738 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4739 | |
4740 | if (!reason && from_transport) | |
4741 | goto after_reason; | |
4742 | ||
4743 | if (!from_transport) | |
4744 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | |
4745 | /* | |
4746 | * Data Segment and SenseLength of the fabric response PDU. | |
4747 | * | |
4748 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | |
4749 | * from include/scsi/scsi_cmnd.h | |
4750 | */ | |
e3d6f909 | 4751 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
4752 | TRANSPORT_SENSE_BUFFER); |
4753 | /* | |
4754 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | |
4755 | * SENSE KEY values from include/scsi/scsi.h | |
4756 | */ | |
4757 | switch (reason) { | |
4758 | case TCM_NON_EXISTENT_LUN: | |
eb39d340 NB |
4759 | /* CURRENT ERROR */ |
4760 | buffer[offset] = 0x70; | |
4761 | /* ILLEGAL REQUEST */ | |
4762 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4763 | /* LOGICAL UNIT NOT SUPPORTED */ | |
4764 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | |
4765 | break; | |
c66ac9db NB |
4766 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
4767 | case TCM_SECTOR_COUNT_TOO_MANY: | |
4768 | /* CURRENT ERROR */ | |
4769 | buffer[offset] = 0x70; | |
4770 | /* ILLEGAL REQUEST */ | |
4771 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4772 | /* INVALID COMMAND OPERATION CODE */ | |
4773 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | |
4774 | break; | |
4775 | case TCM_UNKNOWN_MODE_PAGE: | |
4776 | /* CURRENT ERROR */ | |
4777 | buffer[offset] = 0x70; | |
4778 | /* ILLEGAL REQUEST */ | |
4779 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4780 | /* INVALID FIELD IN CDB */ | |
4781 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4782 | break; | |
4783 | case TCM_CHECK_CONDITION_ABORT_CMD: | |
4784 | /* CURRENT ERROR */ | |
4785 | buffer[offset] = 0x70; | |
4786 | /* ABORTED COMMAND */ | |
4787 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4788 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | |
4789 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | |
4790 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | |
4791 | break; | |
4792 | case TCM_INCORRECT_AMOUNT_OF_DATA: | |
4793 | /* CURRENT ERROR */ | |
4794 | buffer[offset] = 0x70; | |
4795 | /* ABORTED COMMAND */ | |
4796 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4797 | /* WRITE ERROR */ | |
4798 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4799 | /* NOT ENOUGH UNSOLICITED DATA */ | |
4800 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | |
4801 | break; | |
4802 | case TCM_INVALID_CDB_FIELD: | |
4803 | /* CURRENT ERROR */ | |
4804 | buffer[offset] = 0x70; | |
4805 | /* ABORTED COMMAND */ | |
4806 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4807 | /* INVALID FIELD IN CDB */ | |
4808 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4809 | break; | |
4810 | case TCM_INVALID_PARAMETER_LIST: | |
4811 | /* CURRENT ERROR */ | |
4812 | buffer[offset] = 0x70; | |
4813 | /* ABORTED COMMAND */ | |
4814 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4815 | /* INVALID FIELD IN PARAMETER LIST */ | |
4816 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | |
4817 | break; | |
4818 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | |
4819 | /* CURRENT ERROR */ | |
4820 | buffer[offset] = 0x70; | |
4821 | /* ABORTED COMMAND */ | |
4822 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4823 | /* WRITE ERROR */ | |
4824 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4825 | /* UNEXPECTED_UNSOLICITED_DATA */ | |
4826 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | |
4827 | break; | |
4828 | case TCM_SERVICE_CRC_ERROR: | |
4829 | /* CURRENT ERROR */ | |
4830 | buffer[offset] = 0x70; | |
4831 | /* ABORTED COMMAND */ | |
4832 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4833 | /* PROTOCOL SERVICE CRC ERROR */ | |
4834 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | |
4835 | /* N/A */ | |
4836 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | |
4837 | break; | |
4838 | case TCM_SNACK_REJECTED: | |
4839 | /* CURRENT ERROR */ | |
4840 | buffer[offset] = 0x70; | |
4841 | /* ABORTED COMMAND */ | |
4842 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4843 | /* READ ERROR */ | |
4844 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | |
4845 | /* FAILED RETRANSMISSION REQUEST */ | |
4846 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | |
4847 | break; | |
4848 | case TCM_WRITE_PROTECTED: | |
4849 | /* CURRENT ERROR */ | |
4850 | buffer[offset] = 0x70; | |
4851 | /* DATA PROTECT */ | |
4852 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | |
4853 | /* WRITE PROTECTED */ | |
4854 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | |
4855 | break; | |
4856 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | |
4857 | /* CURRENT ERROR */ | |
4858 | buffer[offset] = 0x70; | |
4859 | /* UNIT ATTENTION */ | |
4860 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | |
4861 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | |
4862 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4863 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4864 | break; | |
4865 | case TCM_CHECK_CONDITION_NOT_READY: | |
4866 | /* CURRENT ERROR */ | |
4867 | buffer[offset] = 0x70; | |
4868 | /* Not Ready */ | |
4869 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | |
4870 | transport_get_sense_codes(cmd, &asc, &ascq); | |
4871 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4872 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4873 | break; | |
4874 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | |
4875 | default: | |
4876 | /* CURRENT ERROR */ | |
4877 | buffer[offset] = 0x70; | |
4878 | /* ILLEGAL REQUEST */ | |
4879 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4880 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | |
4881 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | |
4882 | break; | |
4883 | } | |
4884 | /* | |
4885 | * This code uses linux/include/scsi/scsi.h SAM status codes! | |
4886 | */ | |
4887 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | |
4888 | /* | |
4889 | * Automatically padded, this value is encoded in the fabric's | |
4890 | * data_length response PDU containing the SCSI defined sense data. | |
4891 | */ | |
4892 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | |
4893 | ||
4894 | after_reason: | |
07bde79a | 4895 | return cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4896 | } |
4897 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | |
4898 | ||
4899 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |
4900 | { | |
4901 | int ret = 0; | |
4902 | ||
a1d8b49a | 4903 | if (atomic_read(&cmd->t_transport_aborted) != 0) { |
6708bb27 | 4904 | if (!send_status || |
c66ac9db NB |
4905 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
4906 | return 1; | |
4907 | #if 0 | |
6708bb27 | 4908 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" |
c66ac9db | 4909 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
a1d8b49a | 4910 | cmd->t_task_cdb[0], |
e3d6f909 | 4911 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4912 | #endif |
4913 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | |
e3d6f909 | 4914 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4915 | ret = 1; |
4916 | } | |
4917 | return ret; | |
4918 | } | |
4919 | EXPORT_SYMBOL(transport_check_aborted_status); | |
4920 | ||
4921 | void transport_send_task_abort(struct se_cmd *cmd) | |
4922 | { | |
4923 | /* | |
4924 | * If there are still expected incoming fabric WRITEs, we wait | |
4925 | * until until they have completed before sending a TASK_ABORTED | |
4926 | * response. This response with TASK_ABORTED status will be | |
4927 | * queued back to fabric module by transport_check_aborted_status(). | |
4928 | */ | |
4929 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
e3d6f909 | 4930 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
a1d8b49a | 4931 | atomic_inc(&cmd->t_transport_aborted); |
c66ac9db NB |
4932 | smp_mb__after_atomic_inc(); |
4933 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4934 | transport_new_cmd_failure(cmd); | |
4935 | return; | |
4936 | } | |
4937 | } | |
4938 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4939 | #if 0 | |
6708bb27 | 4940 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
a1d8b49a | 4941 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], |
e3d6f909 | 4942 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4943 | #endif |
e3d6f909 | 4944 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4945 | } |
4946 | ||
4947 | /* transport_generic_do_tmr(): | |
4948 | * | |
4949 | * | |
4950 | */ | |
4951 | int transport_generic_do_tmr(struct se_cmd *cmd) | |
4952 | { | |
5951146d | 4953 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4954 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
4955 | int ret; | |
4956 | ||
4957 | switch (tmr->function) { | |
5c6cd613 | 4958 | case TMR_ABORT_TASK: |
c66ac9db NB |
4959 | tmr->response = TMR_FUNCTION_REJECTED; |
4960 | break; | |
5c6cd613 NB |
4961 | case TMR_ABORT_TASK_SET: |
4962 | case TMR_CLEAR_ACA: | |
4963 | case TMR_CLEAR_TASK_SET: | |
c66ac9db NB |
4964 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
4965 | break; | |
5c6cd613 | 4966 | case TMR_LUN_RESET: |
c66ac9db NB |
4967 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
4968 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | |
4969 | TMR_FUNCTION_REJECTED; | |
4970 | break; | |
5c6cd613 | 4971 | case TMR_TARGET_WARM_RESET: |
c66ac9db NB |
4972 | tmr->response = TMR_FUNCTION_REJECTED; |
4973 | break; | |
5c6cd613 | 4974 | case TMR_TARGET_COLD_RESET: |
c66ac9db NB |
4975 | tmr->response = TMR_FUNCTION_REJECTED; |
4976 | break; | |
c66ac9db | 4977 | default: |
6708bb27 | 4978 | pr_err("Uknown TMR function: 0x%02x.\n", |
c66ac9db NB |
4979 | tmr->function); |
4980 | tmr->response = TMR_FUNCTION_REJECTED; | |
4981 | break; | |
4982 | } | |
4983 | ||
4984 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | |
e3d6f909 | 4985 | cmd->se_tfo->queue_tm_rsp(cmd); |
c66ac9db NB |
4986 | |
4987 | transport_cmd_check_stop(cmd, 2, 0); | |
4988 | return 0; | |
4989 | } | |
4990 | ||
4991 | /* | |
4992 | * Called with spin_lock_irq(&dev->execute_task_lock); held | |
4993 | * | |
4994 | */ | |
4995 | static struct se_task * | |
4996 | transport_get_task_from_state_list(struct se_device *dev) | |
4997 | { | |
4998 | struct se_task *task; | |
4999 | ||
5000 | if (list_empty(&dev->state_task_list)) | |
5001 | return NULL; | |
5002 | ||
5003 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | |
5004 | break; | |
5005 | ||
5006 | list_del(&task->t_state_list); | |
5007 | atomic_set(&task->task_state_active, 0); | |
5008 | ||
5009 | return task; | |
5010 | } | |
5011 | ||
5012 | static void transport_processing_shutdown(struct se_device *dev) | |
5013 | { | |
5014 | struct se_cmd *cmd; | |
c66ac9db | 5015 | struct se_task *task; |
c66ac9db NB |
5016 | unsigned long flags; |
5017 | /* | |
5018 | * Empty the struct se_device's struct se_task state list. | |
5019 | */ | |
5020 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5021 | while ((task = transport_get_task_from_state_list(dev))) { | |
e3d6f909 | 5022 | if (!task->task_se_cmd) { |
6708bb27 | 5023 | pr_err("task->task_se_cmd is NULL!\n"); |
c66ac9db NB |
5024 | continue; |
5025 | } | |
e3d6f909 | 5026 | cmd = task->task_se_cmd; |
c66ac9db | 5027 | |
c66ac9db NB |
5028 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
5029 | ||
a1d8b49a | 5030 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 5031 | |
6708bb27 AG |
5032 | pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," |
5033 | " i_state: %d, t_state/def_t_state:" | |
c66ac9db | 5034 | " %d/%d cdb: 0x%02x\n", cmd, task, |
6708bb27 AG |
5035 | cmd->se_tfo->get_task_tag(cmd), |
5036 | cmd->se_tfo->get_cmd_state(cmd), | |
c66ac9db | 5037 | cmd->t_state, cmd->deferred_t_state, |
a1d8b49a | 5038 | cmd->t_task_cdb[0]); |
6708bb27 | 5039 | pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" |
c66ac9db NB |
5040 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" |
5041 | " t_transport_stop: %d t_transport_sent: %d\n", | |
e3d6f909 | 5042 | cmd->se_tfo->get_task_tag(cmd), |
6708bb27 | 5043 | cmd->t_task_list_num, |
a1d8b49a AG |
5044 | atomic_read(&cmd->t_task_cdbs_left), |
5045 | atomic_read(&cmd->t_task_cdbs_sent), | |
5046 | atomic_read(&cmd->t_transport_active), | |
5047 | atomic_read(&cmd->t_transport_stop), | |
5048 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
5049 | |
5050 | if (atomic_read(&task->task_active)) { | |
5051 | atomic_set(&task->task_stop, 1); | |
5052 | spin_unlock_irqrestore( | |
a1d8b49a | 5053 | &cmd->t_state_lock, flags); |
c66ac9db | 5054 | |
6708bb27 | 5055 | pr_debug("Waiting for task: %p to shutdown for dev:" |
c66ac9db NB |
5056 | " %p\n", task, dev); |
5057 | wait_for_completion(&task->task_stop_comp); | |
6708bb27 | 5058 | pr_debug("Completed task: %p shutdown for dev: %p\n", |
c66ac9db NB |
5059 | task, dev); |
5060 | ||
a1d8b49a AG |
5061 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5062 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
5063 | |
5064 | atomic_set(&task->task_active, 0); | |
5065 | atomic_set(&task->task_stop, 0); | |
52208ae3 NB |
5066 | } else { |
5067 | if (atomic_read(&task->task_execute_queue) != 0) | |
5068 | transport_remove_task_from_execute_queue(task, dev); | |
c66ac9db NB |
5069 | } |
5070 | __transport_stop_task_timer(task, &flags); | |
5071 | ||
6708bb27 | 5072 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { |
c66ac9db | 5073 | spin_unlock_irqrestore( |
a1d8b49a | 5074 | &cmd->t_state_lock, flags); |
c66ac9db | 5075 | |
6708bb27 | 5076 | pr_debug("Skipping task: %p, dev: %p for" |
c66ac9db | 5077 | " t_task_cdbs_ex_left: %d\n", task, dev, |
a1d8b49a | 5078 | atomic_read(&cmd->t_task_cdbs_ex_left)); |
c66ac9db NB |
5079 | |
5080 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5081 | continue; | |
5082 | } | |
5083 | ||
a1d8b49a | 5084 | if (atomic_read(&cmd->t_transport_active)) { |
6708bb27 | 5085 | pr_debug("got t_transport_active = 1 for task: %p, dev:" |
c66ac9db NB |
5086 | " %p\n", task, dev); |
5087 | ||
a1d8b49a | 5088 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 5089 | spin_unlock_irqrestore( |
a1d8b49a | 5090 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5091 | transport_send_check_condition_and_sense( |
5092 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | |
5093 | 0); | |
5094 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5095 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5096 | |
5097 | transport_lun_remove_cmd(cmd); | |
5098 | transport_cmd_check_stop(cmd, 1, 0); | |
5099 | } else { | |
5100 | spin_unlock_irqrestore( | |
a1d8b49a | 5101 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5102 | |
5103 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5104 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5105 | |
5106 | transport_lun_remove_cmd(cmd); | |
5107 | ||
5108 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
35462975 | 5109 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
5110 | } |
5111 | ||
5112 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5113 | continue; | |
5114 | } | |
6708bb27 | 5115 | pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", |
c66ac9db NB |
5116 | task, dev); |
5117 | ||
a1d8b49a | 5118 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 5119 | spin_unlock_irqrestore( |
a1d8b49a | 5120 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5121 | transport_send_check_condition_and_sense(cmd, |
5122 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5123 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5124 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5125 | |
5126 | transport_lun_remove_cmd(cmd); | |
5127 | transport_cmd_check_stop(cmd, 1, 0); | |
5128 | } else { | |
5129 | spin_unlock_irqrestore( | |
a1d8b49a | 5130 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5131 | |
5132 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5133 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5134 | transport_lun_remove_cmd(cmd); |
5135 | ||
5136 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
35462975 | 5137 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
5138 | } |
5139 | ||
5140 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5141 | } | |
5142 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
5143 | /* | |
5144 | * Empty the struct se_device's struct se_cmd list. | |
5145 | */ | |
5951146d | 5146 | while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { |
c66ac9db | 5147 | |
6708bb27 | 5148 | pr_debug("From Device Queue: cmd: %p t_state: %d\n", |
5951146d | 5149 | cmd, cmd->t_state); |
c66ac9db | 5150 | |
a1d8b49a | 5151 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db NB |
5152 | transport_send_check_condition_and_sense(cmd, |
5153 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5154 | ||
5155 | transport_lun_remove_cmd(cmd); | |
5156 | transport_cmd_check_stop(cmd, 1, 0); | |
5157 | } else { | |
5158 | transport_lun_remove_cmd(cmd); | |
5159 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
35462975 | 5160 | transport_generic_remove(cmd, 0); |
c66ac9db | 5161 | } |
c66ac9db | 5162 | } |
c66ac9db NB |
5163 | } |
5164 | ||
5165 | /* transport_processing_thread(): | |
5166 | * | |
5167 | * | |
5168 | */ | |
5169 | static int transport_processing_thread(void *param) | |
5170 | { | |
5951146d | 5171 | int ret; |
c66ac9db NB |
5172 | struct se_cmd *cmd; |
5173 | struct se_device *dev = (struct se_device *) param; | |
c66ac9db NB |
5174 | |
5175 | set_user_nice(current, -20); | |
5176 | ||
5177 | while (!kthread_should_stop()) { | |
e3d6f909 AG |
5178 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, |
5179 | atomic_read(&dev->dev_queue_obj.queue_cnt) || | |
c66ac9db NB |
5180 | kthread_should_stop()); |
5181 | if (ret < 0) | |
5182 | goto out; | |
5183 | ||
5184 | spin_lock_irq(&dev->dev_status_lock); | |
5185 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | |
5186 | spin_unlock_irq(&dev->dev_status_lock); | |
5187 | transport_processing_shutdown(dev); | |
5188 | continue; | |
5189 | } | |
5190 | spin_unlock_irq(&dev->dev_status_lock); | |
5191 | ||
5192 | get_cmd: | |
5193 | __transport_execute_tasks(dev); | |
5194 | ||
5951146d AG |
5195 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); |
5196 | if (!cmd) | |
c66ac9db NB |
5197 | continue; |
5198 | ||
5951146d | 5199 | switch (cmd->t_state) { |
c66ac9db | 5200 | case TRANSPORT_NEW_CMD_MAP: |
6708bb27 AG |
5201 | if (!cmd->se_tfo->new_cmd_map) { |
5202 | pr_err("cmd->se_tfo->new_cmd_map is" | |
c66ac9db NB |
5203 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
5204 | BUG(); | |
5205 | } | |
e3d6f909 | 5206 | ret = cmd->se_tfo->new_cmd_map(cmd); |
c66ac9db NB |
5207 | if (ret < 0) { |
5208 | cmd->transport_error_status = ret; | |
5209 | transport_generic_request_failure(cmd, NULL, | |
5210 | 0, (cmd->data_direction != | |
5211 | DMA_TO_DEVICE)); | |
5212 | break; | |
5213 | } | |
5214 | /* Fall through */ | |
5215 | case TRANSPORT_NEW_CMD: | |
5216 | ret = transport_generic_new_cmd(cmd); | |
07bde79a NB |
5217 | if (ret == -EAGAIN) |
5218 | break; | |
5219 | else if (ret < 0) { | |
c66ac9db NB |
5220 | cmd->transport_error_status = ret; |
5221 | transport_generic_request_failure(cmd, NULL, | |
5222 | 0, (cmd->data_direction != | |
5223 | DMA_TO_DEVICE)); | |
5224 | } | |
5225 | break; | |
5226 | case TRANSPORT_PROCESS_WRITE: | |
5227 | transport_generic_process_write(cmd); | |
5228 | break; | |
5229 | case TRANSPORT_COMPLETE_OK: | |
5230 | transport_stop_all_task_timers(cmd); | |
5231 | transport_generic_complete_ok(cmd); | |
5232 | break; | |
5233 | case TRANSPORT_REMOVE: | |
35462975 | 5234 | transport_generic_remove(cmd, 0); |
c66ac9db | 5235 | break; |
f4366772 | 5236 | case TRANSPORT_FREE_CMD_INTR: |
35462975 | 5237 | transport_generic_free_cmd(cmd, 0, 0); |
f4366772 | 5238 | break; |
c66ac9db NB |
5239 | case TRANSPORT_PROCESS_TMR: |
5240 | transport_generic_do_tmr(cmd); | |
5241 | break; | |
5242 | case TRANSPORT_COMPLETE_FAILURE: | |
5243 | transport_generic_request_failure(cmd, NULL, 1, 1); | |
5244 | break; | |
5245 | case TRANSPORT_COMPLETE_TIMEOUT: | |
5246 | transport_stop_all_task_timers(cmd); | |
5247 | transport_generic_request_timeout(cmd); | |
5248 | break; | |
07bde79a NB |
5249 | case TRANSPORT_COMPLETE_QF_WP: |
5250 | transport_generic_write_pending(cmd); | |
5251 | break; | |
c66ac9db | 5252 | default: |
6708bb27 | 5253 | pr_err("Unknown t_state: %d deferred_t_state:" |
c66ac9db | 5254 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" |
5951146d | 5255 | " %u\n", cmd->t_state, cmd->deferred_t_state, |
e3d6f909 AG |
5256 | cmd->se_tfo->get_task_tag(cmd), |
5257 | cmd->se_tfo->get_cmd_state(cmd), | |
5258 | cmd->se_lun->unpacked_lun); | |
c66ac9db NB |
5259 | BUG(); |
5260 | } | |
5261 | ||
5262 | goto get_cmd; | |
5263 | } | |
5264 | ||
5265 | out: | |
5266 | transport_release_all_cmds(dev); | |
5267 | dev->process_thread = NULL; | |
5268 | return 0; | |
5269 | } |