]>
Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | |
3 | * | |
4 | * This file contains the Generic Target Engine Core. | |
5 | * | |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
10 | * | |
11 | * Nicholas A. Bellinger <nab@kernel.org> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2 of the License, or | |
16 | * (at your option) any later version. | |
17 | * | |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU General Public License | |
24 | * along with this program; if not, write to the Free Software | |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
26 | * | |
27 | ******************************************************************************/ | |
28 | ||
29 | #include <linux/version.h> | |
30 | #include <linux/net.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/blkdev.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp_lock.h> | |
38 | #include <linux/kthread.h> | |
39 | #include <linux/in.h> | |
40 | #include <linux/cdrom.h> | |
41 | #include <asm/unaligned.h> | |
42 | #include <net/sock.h> | |
43 | #include <net/tcp.h> | |
44 | #include <scsi/scsi.h> | |
45 | #include <scsi/scsi_cmnd.h> | |
46 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | |
47 | ||
48 | #include <target/target_core_base.h> | |
49 | #include <target/target_core_device.h> | |
50 | #include <target/target_core_tmr.h> | |
51 | #include <target/target_core_tpg.h> | |
52 | #include <target/target_core_transport.h> | |
53 | #include <target/target_core_fabric_ops.h> | |
54 | #include <target/target_core_configfs.h> | |
55 | ||
56 | #include "target_core_alua.h" | |
57 | #include "target_core_hba.h" | |
58 | #include "target_core_pr.h" | |
59 | #include "target_core_scdb.h" | |
60 | #include "target_core_ua.h" | |
61 | ||
62 | /* #define DEBUG_CDB_HANDLER */ | |
63 | #ifdef DEBUG_CDB_HANDLER | |
64 | #define DEBUG_CDB_H(x...) printk(KERN_INFO x) | |
65 | #else | |
66 | #define DEBUG_CDB_H(x...) | |
67 | #endif | |
68 | ||
69 | /* #define DEBUG_CMD_MAP */ | |
70 | #ifdef DEBUG_CMD_MAP | |
71 | #define DEBUG_CMD_M(x...) printk(KERN_INFO x) | |
72 | #else | |
73 | #define DEBUG_CMD_M(x...) | |
74 | #endif | |
75 | ||
76 | /* #define DEBUG_MEM_ALLOC */ | |
77 | #ifdef DEBUG_MEM_ALLOC | |
78 | #define DEBUG_MEM(x...) printk(KERN_INFO x) | |
79 | #else | |
80 | #define DEBUG_MEM(x...) | |
81 | #endif | |
82 | ||
83 | /* #define DEBUG_MEM2_ALLOC */ | |
84 | #ifdef DEBUG_MEM2_ALLOC | |
85 | #define DEBUG_MEM2(x...) printk(KERN_INFO x) | |
86 | #else | |
87 | #define DEBUG_MEM2(x...) | |
88 | #endif | |
89 | ||
90 | /* #define DEBUG_SG_CALC */ | |
91 | #ifdef DEBUG_SG_CALC | |
92 | #define DEBUG_SC(x...) printk(KERN_INFO x) | |
93 | #else | |
94 | #define DEBUG_SC(x...) | |
95 | #endif | |
96 | ||
97 | /* #define DEBUG_SE_OBJ */ | |
98 | #ifdef DEBUG_SE_OBJ | |
99 | #define DEBUG_SO(x...) printk(KERN_INFO x) | |
100 | #else | |
101 | #define DEBUG_SO(x...) | |
102 | #endif | |
103 | ||
104 | /* #define DEBUG_CMD_VOL */ | |
105 | #ifdef DEBUG_CMD_VOL | |
106 | #define DEBUG_VOL(x...) printk(KERN_INFO x) | |
107 | #else | |
108 | #define DEBUG_VOL(x...) | |
109 | #endif | |
110 | ||
111 | /* #define DEBUG_CMD_STOP */ | |
112 | #ifdef DEBUG_CMD_STOP | |
113 | #define DEBUG_CS(x...) printk(KERN_INFO x) | |
114 | #else | |
115 | #define DEBUG_CS(x...) | |
116 | #endif | |
117 | ||
118 | /* #define DEBUG_PASSTHROUGH */ | |
119 | #ifdef DEBUG_PASSTHROUGH | |
120 | #define DEBUG_PT(x...) printk(KERN_INFO x) | |
121 | #else | |
122 | #define DEBUG_PT(x...) | |
123 | #endif | |
124 | ||
125 | /* #define DEBUG_TASK_STOP */ | |
126 | #ifdef DEBUG_TASK_STOP | |
127 | #define DEBUG_TS(x...) printk(KERN_INFO x) | |
128 | #else | |
129 | #define DEBUG_TS(x...) | |
130 | #endif | |
131 | ||
132 | /* #define DEBUG_TRANSPORT_STOP */ | |
133 | #ifdef DEBUG_TRANSPORT_STOP | |
134 | #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) | |
135 | #else | |
136 | #define DEBUG_TRANSPORT_S(x...) | |
137 | #endif | |
138 | ||
139 | /* #define DEBUG_TASK_FAILURE */ | |
140 | #ifdef DEBUG_TASK_FAILURE | |
141 | #define DEBUG_TF(x...) printk(KERN_INFO x) | |
142 | #else | |
143 | #define DEBUG_TF(x...) | |
144 | #endif | |
145 | ||
146 | /* #define DEBUG_DEV_OFFLINE */ | |
147 | #ifdef DEBUG_DEV_OFFLINE | |
148 | #define DEBUG_DO(x...) printk(KERN_INFO x) | |
149 | #else | |
150 | #define DEBUG_DO(x...) | |
151 | #endif | |
152 | ||
153 | /* #define DEBUG_TASK_STATE */ | |
154 | #ifdef DEBUG_TASK_STATE | |
155 | #define DEBUG_TSTATE(x...) printk(KERN_INFO x) | |
156 | #else | |
157 | #define DEBUG_TSTATE(x...) | |
158 | #endif | |
159 | ||
160 | /* #define DEBUG_STATUS_THR */ | |
161 | #ifdef DEBUG_STATUS_THR | |
162 | #define DEBUG_ST(x...) printk(KERN_INFO x) | |
163 | #else | |
164 | #define DEBUG_ST(x...) | |
165 | #endif | |
166 | ||
167 | /* #define DEBUG_TASK_TIMEOUT */ | |
168 | #ifdef DEBUG_TASK_TIMEOUT | |
169 | #define DEBUG_TT(x...) printk(KERN_INFO x) | |
170 | #else | |
171 | #define DEBUG_TT(x...) | |
172 | #endif | |
173 | ||
174 | /* #define DEBUG_GENERIC_REQUEST_FAILURE */ | |
175 | #ifdef DEBUG_GENERIC_REQUEST_FAILURE | |
176 | #define DEBUG_GRF(x...) printk(KERN_INFO x) | |
177 | #else | |
178 | #define DEBUG_GRF(x...) | |
179 | #endif | |
180 | ||
181 | /* #define DEBUG_SAM_TASK_ATTRS */ | |
182 | #ifdef DEBUG_SAM_TASK_ATTRS | |
183 | #define DEBUG_STA(x...) printk(KERN_INFO x) | |
184 | #else | |
185 | #define DEBUG_STA(x...) | |
186 | #endif | |
187 | ||
188 | struct se_global *se_global; | |
189 | ||
190 | static struct kmem_cache *se_cmd_cache; | |
191 | static struct kmem_cache *se_sess_cache; | |
192 | struct kmem_cache *se_tmr_req_cache; | |
193 | struct kmem_cache *se_ua_cache; | |
194 | struct kmem_cache *se_mem_cache; | |
195 | struct kmem_cache *t10_pr_reg_cache; | |
196 | struct kmem_cache *t10_alua_lu_gp_cache; | |
197 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | |
198 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | |
199 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |
200 | ||
201 | /* Used for transport_dev_get_map_*() */ | |
202 | typedef int (*map_func_t)(struct se_task *, u32); | |
203 | ||
204 | static int transport_generic_write_pending(struct se_cmd *); | |
205 | static int transport_processing_thread(void *); | |
206 | static int __transport_execute_tasks(struct se_device *dev); | |
207 | static void transport_complete_task_attr(struct se_cmd *cmd); | |
208 | static void transport_direct_request_timeout(struct se_cmd *cmd); | |
209 | static void transport_free_dev_tasks(struct se_cmd *cmd); | |
210 | static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, | |
211 | unsigned long long starting_lba, u32 sectors, | |
212 | enum dma_data_direction data_direction, | |
213 | struct list_head *mem_list, int set_counts); | |
214 | static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, | |
215 | u32 dma_size); | |
216 | static int transport_generic_remove(struct se_cmd *cmd, | |
217 | int release_to_pool, int session_reinstatement); | |
218 | static int transport_get_sectors(struct se_cmd *cmd); | |
219 | static struct list_head *transport_init_se_mem_list(void); | |
220 | static int transport_map_sg_to_mem(struct se_cmd *cmd, | |
221 | struct list_head *se_mem_list, void *in_mem, | |
222 | u32 *se_mem_cnt); | |
223 | static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, | |
224 | unsigned char *dst, struct list_head *se_mem_list); | |
225 | static void transport_release_fe_cmd(struct se_cmd *cmd); | |
226 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
227 | struct se_queue_obj *qobj); | |
228 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | |
229 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | |
230 | ||
231 | int transport_emulate_control_cdb(struct se_task *task); | |
232 | ||
233 | int init_se_global(void) | |
234 | { | |
235 | struct se_global *global; | |
236 | ||
237 | global = kzalloc(sizeof(struct se_global), GFP_KERNEL); | |
238 | if (!(global)) { | |
239 | printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); | |
240 | return -1; | |
241 | } | |
242 | ||
243 | INIT_LIST_HEAD(&global->g_lu_gps_list); | |
244 | INIT_LIST_HEAD(&global->g_se_tpg_list); | |
245 | INIT_LIST_HEAD(&global->g_hba_list); | |
246 | INIT_LIST_HEAD(&global->g_se_dev_list); | |
247 | spin_lock_init(&global->g_device_lock); | |
248 | spin_lock_init(&global->hba_lock); | |
249 | spin_lock_init(&global->se_tpg_lock); | |
250 | spin_lock_init(&global->lu_gps_lock); | |
251 | spin_lock_init(&global->plugin_class_lock); | |
252 | ||
253 | se_cmd_cache = kmem_cache_create("se_cmd_cache", | |
254 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | |
255 | if (!(se_cmd_cache)) { | |
256 | printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); | |
257 | goto out; | |
258 | } | |
259 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | |
260 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | |
261 | 0, NULL); | |
262 | if (!(se_tmr_req_cache)) { | |
263 | printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" | |
264 | " failed\n"); | |
265 | goto out; | |
266 | } | |
267 | se_sess_cache = kmem_cache_create("se_sess_cache", | |
268 | sizeof(struct se_session), __alignof__(struct se_session), | |
269 | 0, NULL); | |
270 | if (!(se_sess_cache)) { | |
271 | printk(KERN_ERR "kmem_cache_create() for struct se_session" | |
272 | " failed\n"); | |
273 | goto out; | |
274 | } | |
275 | se_ua_cache = kmem_cache_create("se_ua_cache", | |
276 | sizeof(struct se_ua), __alignof__(struct se_ua), | |
277 | 0, NULL); | |
278 | if (!(se_ua_cache)) { | |
279 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); | |
280 | goto out; | |
281 | } | |
282 | se_mem_cache = kmem_cache_create("se_mem_cache", | |
283 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); | |
284 | if (!(se_mem_cache)) { | |
285 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); | |
286 | goto out; | |
287 | } | |
288 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | |
289 | sizeof(struct t10_pr_registration), | |
290 | __alignof__(struct t10_pr_registration), 0, NULL); | |
291 | if (!(t10_pr_reg_cache)) { | |
292 | printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" | |
293 | " failed\n"); | |
294 | goto out; | |
295 | } | |
296 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | |
297 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | |
298 | 0, NULL); | |
299 | if (!(t10_alua_lu_gp_cache)) { | |
300 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" | |
301 | " failed\n"); | |
302 | goto out; | |
303 | } | |
304 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | |
305 | sizeof(struct t10_alua_lu_gp_member), | |
306 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | |
307 | if (!(t10_alua_lu_gp_mem_cache)) { | |
308 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" | |
309 | "cache failed\n"); | |
310 | goto out; | |
311 | } | |
312 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | |
313 | sizeof(struct t10_alua_tg_pt_gp), | |
314 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | |
315 | if (!(t10_alua_tg_pt_gp_cache)) { | |
316 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | |
317 | "cache failed\n"); | |
318 | goto out; | |
319 | } | |
320 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | |
321 | "t10_alua_tg_pt_gp_mem_cache", | |
322 | sizeof(struct t10_alua_tg_pt_gp_member), | |
323 | __alignof__(struct t10_alua_tg_pt_gp_member), | |
324 | 0, NULL); | |
325 | if (!(t10_alua_tg_pt_gp_mem_cache)) { | |
326 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | |
327 | "mem_t failed\n"); | |
328 | goto out; | |
329 | } | |
330 | ||
331 | se_global = global; | |
332 | ||
333 | return 0; | |
334 | out: | |
335 | if (se_cmd_cache) | |
336 | kmem_cache_destroy(se_cmd_cache); | |
337 | if (se_tmr_req_cache) | |
338 | kmem_cache_destroy(se_tmr_req_cache); | |
339 | if (se_sess_cache) | |
340 | kmem_cache_destroy(se_sess_cache); | |
341 | if (se_ua_cache) | |
342 | kmem_cache_destroy(se_ua_cache); | |
343 | if (se_mem_cache) | |
344 | kmem_cache_destroy(se_mem_cache); | |
345 | if (t10_pr_reg_cache) | |
346 | kmem_cache_destroy(t10_pr_reg_cache); | |
347 | if (t10_alua_lu_gp_cache) | |
348 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
349 | if (t10_alua_lu_gp_mem_cache) | |
350 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
351 | if (t10_alua_tg_pt_gp_cache) | |
352 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
353 | if (t10_alua_tg_pt_gp_mem_cache) | |
354 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
355 | kfree(global); | |
356 | return -1; | |
357 | } | |
358 | ||
359 | void release_se_global(void) | |
360 | { | |
361 | struct se_global *global; | |
362 | ||
363 | global = se_global; | |
364 | if (!(global)) | |
365 | return; | |
366 | ||
367 | kmem_cache_destroy(se_cmd_cache); | |
368 | kmem_cache_destroy(se_tmr_req_cache); | |
369 | kmem_cache_destroy(se_sess_cache); | |
370 | kmem_cache_destroy(se_ua_cache); | |
371 | kmem_cache_destroy(se_mem_cache); | |
372 | kmem_cache_destroy(t10_pr_reg_cache); | |
373 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
374 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
375 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
376 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
377 | kfree(global); | |
378 | ||
379 | se_global = NULL; | |
380 | } | |
381 | ||
382 | void transport_init_queue_obj(struct se_queue_obj *qobj) | |
383 | { | |
384 | atomic_set(&qobj->queue_cnt, 0); | |
385 | INIT_LIST_HEAD(&qobj->qobj_list); | |
386 | init_waitqueue_head(&qobj->thread_wq); | |
387 | spin_lock_init(&qobj->cmd_queue_lock); | |
388 | } | |
389 | EXPORT_SYMBOL(transport_init_queue_obj); | |
390 | ||
391 | static int transport_subsystem_reqmods(void) | |
392 | { | |
393 | int ret; | |
394 | ||
395 | ret = request_module("target_core_iblock"); | |
396 | if (ret != 0) | |
397 | printk(KERN_ERR "Unable to load target_core_iblock\n"); | |
398 | ||
399 | ret = request_module("target_core_file"); | |
400 | if (ret != 0) | |
401 | printk(KERN_ERR "Unable to load target_core_file\n"); | |
402 | ||
403 | ret = request_module("target_core_pscsi"); | |
404 | if (ret != 0) | |
405 | printk(KERN_ERR "Unable to load target_core_pscsi\n"); | |
406 | ||
407 | ret = request_module("target_core_stgt"); | |
408 | if (ret != 0) | |
409 | printk(KERN_ERR "Unable to load target_core_stgt\n"); | |
410 | ||
411 | return 0; | |
412 | } | |
413 | ||
414 | int transport_subsystem_check_init(void) | |
415 | { | |
416 | if (se_global->g_sub_api_initialized) | |
417 | return 0; | |
418 | /* | |
419 | * Request the loading of known TCM subsystem plugins.. | |
420 | */ | |
421 | if (transport_subsystem_reqmods() < 0) | |
422 | return -1; | |
423 | ||
424 | se_global->g_sub_api_initialized = 1; | |
425 | return 0; | |
426 | } | |
427 | ||
428 | struct se_session *transport_init_session(void) | |
429 | { | |
430 | struct se_session *se_sess; | |
431 | ||
432 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | |
433 | if (!(se_sess)) { | |
434 | printk(KERN_ERR "Unable to allocate struct se_session from" | |
435 | " se_sess_cache\n"); | |
436 | return ERR_PTR(-ENOMEM); | |
437 | } | |
438 | INIT_LIST_HEAD(&se_sess->sess_list); | |
439 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | |
440 | atomic_set(&se_sess->mib_ref_count, 0); | |
441 | ||
442 | return se_sess; | |
443 | } | |
444 | EXPORT_SYMBOL(transport_init_session); | |
445 | ||
446 | /* | |
447 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | |
448 | */ | |
449 | void __transport_register_session( | |
450 | struct se_portal_group *se_tpg, | |
451 | struct se_node_acl *se_nacl, | |
452 | struct se_session *se_sess, | |
453 | void *fabric_sess_ptr) | |
454 | { | |
455 | unsigned char buf[PR_REG_ISID_LEN]; | |
456 | ||
457 | se_sess->se_tpg = se_tpg; | |
458 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | |
459 | /* | |
460 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | |
461 | * | |
462 | * Only set for struct se_session's that will actually be moving I/O. | |
463 | * eg: *NOT* discovery sessions. | |
464 | */ | |
465 | if (se_nacl) { | |
466 | /* | |
467 | * If the fabric module supports an ISID based TransportID, | |
468 | * save this value in binary from the fabric I_T Nexus now. | |
469 | */ | |
470 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | |
471 | memset(&buf[0], 0, PR_REG_ISID_LEN); | |
472 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, | |
473 | &buf[0], PR_REG_ISID_LEN); | |
474 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | |
475 | } | |
476 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
477 | /* | |
478 | * The se_nacl->nacl_sess pointer will be set to the | |
479 | * last active I_T Nexus for each struct se_node_acl. | |
480 | */ | |
481 | se_nacl->nacl_sess = se_sess; | |
482 | ||
483 | list_add_tail(&se_sess->sess_acl_list, | |
484 | &se_nacl->acl_sess_list); | |
485 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
486 | } | |
487 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | |
488 | ||
489 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | |
490 | TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); | |
491 | } | |
492 | EXPORT_SYMBOL(__transport_register_session); | |
493 | ||
494 | void transport_register_session( | |
495 | struct se_portal_group *se_tpg, | |
496 | struct se_node_acl *se_nacl, | |
497 | struct se_session *se_sess, | |
498 | void *fabric_sess_ptr) | |
499 | { | |
500 | spin_lock_bh(&se_tpg->session_lock); | |
501 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | |
502 | spin_unlock_bh(&se_tpg->session_lock); | |
503 | } | |
504 | EXPORT_SYMBOL(transport_register_session); | |
505 | ||
506 | void transport_deregister_session_configfs(struct se_session *se_sess) | |
507 | { | |
508 | struct se_node_acl *se_nacl; | |
509 | ||
510 | /* | |
511 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | |
512 | */ | |
513 | se_nacl = se_sess->se_node_acl; | |
514 | if ((se_nacl)) { | |
515 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
516 | list_del(&se_sess->sess_acl_list); | |
517 | /* | |
518 | * If the session list is empty, then clear the pointer. | |
519 | * Otherwise, set the struct se_session pointer from the tail | |
520 | * element of the per struct se_node_acl active session list. | |
521 | */ | |
522 | if (list_empty(&se_nacl->acl_sess_list)) | |
523 | se_nacl->nacl_sess = NULL; | |
524 | else { | |
525 | se_nacl->nacl_sess = container_of( | |
526 | se_nacl->acl_sess_list.prev, | |
527 | struct se_session, sess_acl_list); | |
528 | } | |
529 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
530 | } | |
531 | } | |
532 | EXPORT_SYMBOL(transport_deregister_session_configfs); | |
533 | ||
534 | void transport_free_session(struct se_session *se_sess) | |
535 | { | |
536 | kmem_cache_free(se_sess_cache, se_sess); | |
537 | } | |
538 | EXPORT_SYMBOL(transport_free_session); | |
539 | ||
540 | void transport_deregister_session(struct se_session *se_sess) | |
541 | { | |
542 | struct se_portal_group *se_tpg = se_sess->se_tpg; | |
543 | struct se_node_acl *se_nacl; | |
544 | ||
545 | if (!(se_tpg)) { | |
546 | transport_free_session(se_sess); | |
547 | return; | |
548 | } | |
549 | /* | |
550 | * Wait for possible reference in drivers/target/target_core_mib.c: | |
551 | * scsi_att_intr_port_seq_show() | |
552 | */ | |
553 | while (atomic_read(&se_sess->mib_ref_count) != 0) | |
554 | cpu_relax(); | |
555 | ||
556 | spin_lock_bh(&se_tpg->session_lock); | |
557 | list_del(&se_sess->sess_list); | |
558 | se_sess->se_tpg = NULL; | |
559 | se_sess->fabric_sess_ptr = NULL; | |
560 | spin_unlock_bh(&se_tpg->session_lock); | |
561 | ||
562 | /* | |
563 | * Determine if we need to do extra work for this initiator node's | |
564 | * struct se_node_acl if it had been previously dynamically generated. | |
565 | */ | |
566 | se_nacl = se_sess->se_node_acl; | |
567 | if ((se_nacl)) { | |
568 | spin_lock_bh(&se_tpg->acl_node_lock); | |
569 | if (se_nacl->dynamic_node_acl) { | |
570 | if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( | |
571 | se_tpg))) { | |
572 | list_del(&se_nacl->acl_list); | |
573 | se_tpg->num_node_acls--; | |
574 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
575 | ||
576 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | |
577 | core_tpg_wait_for_mib_ref(se_nacl); | |
578 | core_free_device_list_for_node(se_nacl, se_tpg); | |
579 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | |
580 | se_nacl); | |
581 | spin_lock_bh(&se_tpg->acl_node_lock); | |
582 | } | |
583 | } | |
584 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
585 | } | |
586 | ||
587 | transport_free_session(se_sess); | |
588 | ||
589 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", | |
590 | TPG_TFO(se_tpg)->get_fabric_name()); | |
591 | } | |
592 | EXPORT_SYMBOL(transport_deregister_session); | |
593 | ||
594 | /* | |
595 | * Called with T_TASK(cmd)->t_state_lock held. | |
596 | */ | |
597 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |
598 | { | |
599 | struct se_device *dev; | |
600 | struct se_task *task; | |
601 | unsigned long flags; | |
602 | ||
603 | if (!T_TASK(cmd)) | |
604 | return; | |
605 | ||
606 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
607 | dev = task->se_dev; | |
608 | if (!(dev)) | |
609 | continue; | |
610 | ||
611 | if (atomic_read(&task->task_active)) | |
612 | continue; | |
613 | ||
614 | if (!(atomic_read(&task->task_state_active))) | |
615 | continue; | |
616 | ||
617 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
618 | list_del(&task->t_state_list); | |
619 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", | |
620 | CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); | |
621 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
622 | ||
623 | atomic_set(&task->task_state_active, 0); | |
624 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); | |
625 | } | |
626 | } | |
627 | ||
628 | /* transport_cmd_check_stop(): | |
629 | * | |
630 | * 'transport_off = 1' determines if t_transport_active should be cleared. | |
631 | * 'transport_off = 2' determines if task_dev_state should be removed. | |
632 | * | |
633 | * A non-zero u8 t_state sets cmd->t_state. | |
634 | * Returns 1 when command is stopped, else 0. | |
635 | */ | |
636 | static int transport_cmd_check_stop( | |
637 | struct se_cmd *cmd, | |
638 | int transport_off, | |
639 | u8 t_state) | |
640 | { | |
641 | unsigned long flags; | |
642 | ||
643 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
644 | /* | |
645 | * Determine if IOCTL context caller in requesting the stopping of this | |
646 | * command for LUN shutdown purposes. | |
647 | */ | |
648 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | |
649 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" | |
650 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | |
651 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
652 | ||
653 | cmd->deferred_t_state = cmd->t_state; | |
654 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
655 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | |
656 | if (transport_off == 2) | |
657 | transport_all_task_dev_remove_state(cmd); | |
658 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
659 | ||
660 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | |
661 | return 1; | |
662 | } | |
663 | /* | |
664 | * Determine if frontend context caller is requesting the stopping of | |
665 | * this command for frontend excpections. | |
666 | */ | |
667 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | |
668 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" | |
669 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | |
670 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
671 | ||
672 | cmd->deferred_t_state = cmd->t_state; | |
673 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
674 | if (transport_off == 2) | |
675 | transport_all_task_dev_remove_state(cmd); | |
676 | ||
677 | /* | |
678 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | |
679 | * to FE. | |
680 | */ | |
681 | if (transport_off == 2) | |
682 | cmd->se_lun = NULL; | |
683 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
684 | ||
685 | complete(&T_TASK(cmd)->t_transport_stop_comp); | |
686 | return 1; | |
687 | } | |
688 | if (transport_off) { | |
689 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | |
690 | if (transport_off == 2) { | |
691 | transport_all_task_dev_remove_state(cmd); | |
692 | /* | |
693 | * Clear struct se_cmd->se_lun before the transport_off == 2 | |
694 | * handoff to fabric module. | |
695 | */ | |
696 | cmd->se_lun = NULL; | |
697 | /* | |
698 | * Some fabric modules like tcm_loop can release | |
699 | * their internally allocated I/O refrence now and | |
700 | * struct se_cmd now. | |
701 | */ | |
702 | if (CMD_TFO(cmd)->check_stop_free != NULL) { | |
703 | spin_unlock_irqrestore( | |
704 | &T_TASK(cmd)->t_state_lock, flags); | |
705 | ||
706 | CMD_TFO(cmd)->check_stop_free(cmd); | |
707 | return 1; | |
708 | } | |
709 | } | |
710 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
711 | ||
712 | return 0; | |
713 | } else if (t_state) | |
714 | cmd->t_state = t_state; | |
715 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
716 | ||
717 | return 0; | |
718 | } | |
719 | ||
720 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |
721 | { | |
722 | return transport_cmd_check_stop(cmd, 2, 0); | |
723 | } | |
724 | ||
725 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | |
726 | { | |
727 | struct se_lun *lun = SE_LUN(cmd); | |
728 | unsigned long flags; | |
729 | ||
730 | if (!lun) | |
731 | return; | |
732 | ||
733 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
734 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
735 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
736 | goto check_lun; | |
737 | } | |
738 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
739 | transport_all_task_dev_remove_state(cmd); | |
740 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
741 | ||
742 | transport_free_dev_tasks(cmd); | |
743 | ||
744 | check_lun: | |
745 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | |
746 | if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { | |
747 | list_del(&cmd->se_lun_list); | |
748 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | |
749 | #if 0 | |
750 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" | |
751 | CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); | |
752 | #endif | |
753 | } | |
754 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | |
755 | } | |
756 | ||
757 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |
758 | { | |
759 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | |
760 | transport_lun_remove_cmd(cmd); | |
761 | ||
762 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
763 | return; | |
764 | if (remove) | |
765 | transport_generic_remove(cmd, 0, 0); | |
766 | } | |
767 | ||
768 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | |
769 | { | |
770 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | |
771 | ||
772 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
773 | return; | |
774 | ||
775 | transport_generic_remove(cmd, 0, 0); | |
776 | } | |
777 | ||
778 | static int transport_add_cmd_to_queue( | |
779 | struct se_cmd *cmd, | |
780 | int t_state) | |
781 | { | |
782 | struct se_device *dev = cmd->se_dev; | |
783 | struct se_queue_obj *qobj = dev->dev_queue_obj; | |
784 | struct se_queue_req *qr; | |
785 | unsigned long flags; | |
786 | ||
787 | qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); | |
788 | if (!(qr)) { | |
789 | printk(KERN_ERR "Unable to allocate memory for" | |
790 | " struct se_queue_req\n"); | |
791 | return -1; | |
792 | } | |
793 | INIT_LIST_HEAD(&qr->qr_list); | |
794 | ||
795 | qr->cmd = (void *)cmd; | |
796 | qr->state = t_state; | |
797 | ||
798 | if (t_state) { | |
799 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
800 | cmd->t_state = t_state; | |
801 | atomic_set(&T_TASK(cmd)->t_transport_active, 1); | |
802 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
803 | } | |
804 | ||
805 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
806 | list_add_tail(&qr->qr_list, &qobj->qobj_list); | |
807 | atomic_inc(&T_TASK(cmd)->t_transport_queue_active); | |
808 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
809 | ||
810 | atomic_inc(&qobj->queue_cnt); | |
811 | wake_up_interruptible(&qobj->thread_wq); | |
812 | return 0; | |
813 | } | |
814 | ||
815 | /* | |
816 | * Called with struct se_queue_obj->cmd_queue_lock held. | |
817 | */ | |
818 | static struct se_queue_req * | |
819 | __transport_get_qr_from_queue(struct se_queue_obj *qobj) | |
820 | { | |
821 | struct se_cmd *cmd; | |
822 | struct se_queue_req *qr = NULL; | |
823 | ||
824 | if (list_empty(&qobj->qobj_list)) | |
825 | return NULL; | |
826 | ||
827 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | |
828 | break; | |
829 | ||
830 | if (qr->cmd) { | |
831 | cmd = (struct se_cmd *)qr->cmd; | |
832 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | |
833 | } | |
834 | list_del(&qr->qr_list); | |
835 | atomic_dec(&qobj->queue_cnt); | |
836 | ||
837 | return qr; | |
838 | } | |
839 | ||
840 | static struct se_queue_req * | |
841 | transport_get_qr_from_queue(struct se_queue_obj *qobj) | |
842 | { | |
843 | struct se_cmd *cmd; | |
844 | struct se_queue_req *qr; | |
845 | unsigned long flags; | |
846 | ||
847 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
848 | if (list_empty(&qobj->qobj_list)) { | |
849 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
850 | return NULL; | |
851 | } | |
852 | ||
853 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | |
854 | break; | |
855 | ||
856 | if (qr->cmd) { | |
857 | cmd = (struct se_cmd *)qr->cmd; | |
858 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | |
859 | } | |
860 | list_del(&qr->qr_list); | |
861 | atomic_dec(&qobj->queue_cnt); | |
862 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
863 | ||
864 | return qr; | |
865 | } | |
866 | ||
867 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
868 | struct se_queue_obj *qobj) | |
869 | { | |
870 | struct se_cmd *q_cmd; | |
871 | struct se_queue_req *qr = NULL, *qr_p = NULL; | |
872 | unsigned long flags; | |
873 | ||
874 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
875 | if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { | |
876 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
877 | return; | |
878 | } | |
879 | ||
880 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { | |
881 | q_cmd = (struct se_cmd *)qr->cmd; | |
882 | if (q_cmd != cmd) | |
883 | continue; | |
884 | ||
885 | atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); | |
886 | atomic_dec(&qobj->queue_cnt); | |
887 | list_del(&qr->qr_list); | |
888 | kfree(qr); | |
889 | } | |
890 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
891 | ||
892 | if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { | |
893 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", | |
894 | CMD_TFO(cmd)->get_task_tag(cmd), | |
895 | atomic_read(&T_TASK(cmd)->t_transport_queue_active)); | |
896 | } | |
897 | } | |
898 | ||
899 | /* | |
900 | * Completion function used by TCM subsystem plugins (such as FILEIO) | |
901 | * for queueing up response from struct se_subsystem_api->do_task() | |
902 | */ | |
903 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |
904 | { | |
905 | struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, | |
906 | struct se_task, t_list); | |
907 | ||
908 | if (good) { | |
909 | cmd->scsi_status = SAM_STAT_GOOD; | |
910 | task->task_scsi_status = GOOD; | |
911 | } else { | |
912 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | |
913 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | |
914 | TASK_CMD(task)->transport_error_status = | |
915 | PYX_TRANSPORT_ILLEGAL_REQUEST; | |
916 | } | |
917 | ||
918 | transport_complete_task(task, good); | |
919 | } | |
920 | EXPORT_SYMBOL(transport_complete_sync_cache); | |
921 | ||
922 | /* transport_complete_task(): | |
923 | * | |
924 | * Called from interrupt and non interrupt context depending | |
925 | * on the transport plugin. | |
926 | */ | |
927 | void transport_complete_task(struct se_task *task, int success) | |
928 | { | |
929 | struct se_cmd *cmd = TASK_CMD(task); | |
930 | struct se_device *dev = task->se_dev; | |
931 | int t_state; | |
932 | unsigned long flags; | |
933 | #if 0 | |
934 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, | |
935 | T_TASK(cmd)->t_task_cdb[0], dev); | |
936 | #endif | |
937 | if (dev) { | |
938 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | |
939 | atomic_inc(&dev->depth_left); | |
940 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | |
941 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
942 | } | |
943 | ||
944 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
945 | atomic_set(&task->task_active, 0); | |
946 | ||
947 | /* | |
948 | * See if any sense data exists, if so set the TASK_SENSE flag. | |
949 | * Also check for any other post completion work that needs to be | |
950 | * done by the plugins. | |
951 | */ | |
952 | if (dev && dev->transport->transport_complete) { | |
953 | if (dev->transport->transport_complete(task) != 0) { | |
954 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | |
955 | task->task_sense = 1; | |
956 | success = 1; | |
957 | } | |
958 | } | |
959 | ||
960 | /* | |
961 | * See if we are waiting for outstanding struct se_task | |
962 | * to complete for an exception condition | |
963 | */ | |
964 | if (atomic_read(&task->task_stop)) { | |
965 | /* | |
966 | * Decrement T_TASK(cmd)->t_se_count if this task had | |
967 | * previously thrown its timeout exception handler. | |
968 | */ | |
969 | if (atomic_read(&task->task_timeout)) { | |
970 | atomic_dec(&T_TASK(cmd)->t_se_count); | |
971 | atomic_set(&task->task_timeout, 0); | |
972 | } | |
973 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
974 | ||
975 | complete(&task->task_stop_comp); | |
976 | return; | |
977 | } | |
978 | /* | |
979 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | |
980 | * left counter to determine when the struct se_cmd is ready to be queued to | |
981 | * the processing thread. | |
982 | */ | |
983 | if (atomic_read(&task->task_timeout)) { | |
984 | if (!(atomic_dec_and_test( | |
985 | &T_TASK(cmd)->t_task_cdbs_timeout_left))) { | |
986 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
987 | flags); | |
988 | return; | |
989 | } | |
990 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | |
991 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
992 | ||
993 | transport_add_cmd_to_queue(cmd, t_state); | |
994 | return; | |
995 | } | |
996 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); | |
997 | ||
998 | /* | |
999 | * Decrement the outstanding t_task_cdbs_left count. The last | |
1000 | * struct se_task from struct se_cmd will complete itself into the | |
1001 | * device queue depending upon int success. | |
1002 | */ | |
1003 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | |
1004 | if (!success) | |
1005 | T_TASK(cmd)->t_tasks_failed = 1; | |
1006 | ||
1007 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1008 | return; | |
1009 | } | |
1010 | ||
1011 | if (!success || T_TASK(cmd)->t_tasks_failed) { | |
1012 | t_state = TRANSPORT_COMPLETE_FAILURE; | |
1013 | if (!task->task_error_status) { | |
1014 | task->task_error_status = | |
1015 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
1016 | cmd->transport_error_status = | |
1017 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
1018 | } | |
1019 | } else { | |
1020 | atomic_set(&T_TASK(cmd)->t_transport_complete, 1); | |
1021 | t_state = TRANSPORT_COMPLETE_OK; | |
1022 | } | |
1023 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1024 | ||
1025 | transport_add_cmd_to_queue(cmd, t_state); | |
1026 | } | |
1027 | EXPORT_SYMBOL(transport_complete_task); | |
1028 | ||
1029 | /* | |
1030 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | |
1031 | * struct se_task list are ready to be added to the active execution list | |
1032 | * struct se_device | |
1033 | ||
1034 | * Called with se_dev_t->execute_task_lock called. | |
1035 | */ | |
1036 | static inline int transport_add_task_check_sam_attr( | |
1037 | struct se_task *task, | |
1038 | struct se_task *task_prev, | |
1039 | struct se_device *dev) | |
1040 | { | |
1041 | /* | |
1042 | * No SAM Task attribute emulation enabled, add to tail of | |
1043 | * execution queue | |
1044 | */ | |
1045 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | |
1046 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
1047 | return 0; | |
1048 | } | |
1049 | /* | |
1050 | * HEAD_OF_QUEUE attribute for received CDB, which means | |
1051 | * the first task that is associated with a struct se_cmd goes to | |
1052 | * head of the struct se_device->execute_task_list, and task_prev | |
1053 | * after that for each subsequent task | |
1054 | */ | |
1055 | if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { | |
1056 | list_add(&task->t_execute_list, | |
1057 | (task_prev != NULL) ? | |
1058 | &task_prev->t_execute_list : | |
1059 | &dev->execute_task_list); | |
1060 | ||
1061 | DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" | |
1062 | " in execution queue\n", | |
1063 | T_TASK(task->task_se_cmd)->t_task_cdb[0]); | |
1064 | return 1; | |
1065 | } | |
1066 | /* | |
1067 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | |
1068 | * transitioned from Dermant -> Active state, and are added to the end | |
1069 | * of the struct se_device->execute_task_list | |
1070 | */ | |
1071 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
1072 | return 0; | |
1073 | } | |
1074 | ||
1075 | /* __transport_add_task_to_execute_queue(): | |
1076 | * | |
1077 | * Called with se_dev_t->execute_task_lock called. | |
1078 | */ | |
1079 | static void __transport_add_task_to_execute_queue( | |
1080 | struct se_task *task, | |
1081 | struct se_task *task_prev, | |
1082 | struct se_device *dev) | |
1083 | { | |
1084 | int head_of_queue; | |
1085 | ||
1086 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | |
1087 | atomic_inc(&dev->execute_tasks); | |
1088 | ||
1089 | if (atomic_read(&task->task_state_active)) | |
1090 | return; | |
1091 | /* | |
1092 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | |
1093 | * state list as well. Running with SAM Task Attribute emulation | |
1094 | * will always return head_of_queue == 0 here | |
1095 | */ | |
1096 | if (head_of_queue) | |
1097 | list_add(&task->t_state_list, (task_prev) ? | |
1098 | &task_prev->t_state_list : | |
1099 | &dev->state_task_list); | |
1100 | else | |
1101 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
1102 | ||
1103 | atomic_set(&task->task_state_active, 1); | |
1104 | ||
1105 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | |
1106 | CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), | |
1107 | task, dev); | |
1108 | } | |
1109 | ||
1110 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |
1111 | { | |
1112 | struct se_device *dev; | |
1113 | struct se_task *task; | |
1114 | unsigned long flags; | |
1115 | ||
1116 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
1117 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
1118 | dev = task->se_dev; | |
1119 | ||
1120 | if (atomic_read(&task->task_state_active)) | |
1121 | continue; | |
1122 | ||
1123 | spin_lock(&dev->execute_task_lock); | |
1124 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
1125 | atomic_set(&task->task_state_active, 1); | |
1126 | ||
1127 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | |
1128 | CMD_TFO(task->task_se_cmd)->get_task_tag( | |
1129 | task->task_se_cmd), task, dev); | |
1130 | ||
1131 | spin_unlock(&dev->execute_task_lock); | |
1132 | } | |
1133 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1134 | } | |
1135 | ||
1136 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |
1137 | { | |
1138 | struct se_device *dev = SE_DEV(cmd); | |
1139 | struct se_task *task, *task_prev = NULL; | |
1140 | unsigned long flags; | |
1141 | ||
1142 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
1143 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
1144 | if (atomic_read(&task->task_execute_queue)) | |
1145 | continue; | |
1146 | /* | |
1147 | * __transport_add_task_to_execute_queue() handles the | |
1148 | * SAM Task Attribute emulation if enabled | |
1149 | */ | |
1150 | __transport_add_task_to_execute_queue(task, task_prev, dev); | |
1151 | atomic_set(&task->task_execute_queue, 1); | |
1152 | task_prev = task; | |
1153 | } | |
1154 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
1155 | ||
1156 | return; | |
1157 | } | |
1158 | ||
1159 | /* transport_get_task_from_execute_queue(): | |
1160 | * | |
1161 | * Called with dev->execute_task_lock held. | |
1162 | */ | |
1163 | static struct se_task * | |
1164 | transport_get_task_from_execute_queue(struct se_device *dev) | |
1165 | { | |
1166 | struct se_task *task; | |
1167 | ||
1168 | if (list_empty(&dev->execute_task_list)) | |
1169 | return NULL; | |
1170 | ||
1171 | list_for_each_entry(task, &dev->execute_task_list, t_execute_list) | |
1172 | break; | |
1173 | ||
1174 | list_del(&task->t_execute_list); | |
1175 | atomic_dec(&dev->execute_tasks); | |
1176 | ||
1177 | return task; | |
1178 | } | |
1179 | ||
1180 | /* transport_remove_task_from_execute_queue(): | |
1181 | * | |
1182 | * | |
1183 | */ | |
1184 | static void transport_remove_task_from_execute_queue( | |
1185 | struct se_task *task, | |
1186 | struct se_device *dev) | |
1187 | { | |
1188 | unsigned long flags; | |
1189 | ||
1190 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
1191 | list_del(&task->t_execute_list); | |
1192 | atomic_dec(&dev->execute_tasks); | |
1193 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
1194 | } | |
1195 | ||
1196 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | |
1197 | { | |
1198 | switch (cmd->data_direction) { | |
1199 | case DMA_NONE: | |
1200 | return "NONE"; | |
1201 | case DMA_FROM_DEVICE: | |
1202 | return "READ"; | |
1203 | case DMA_TO_DEVICE: | |
1204 | return "WRITE"; | |
1205 | case DMA_BIDIRECTIONAL: | |
1206 | return "BIDI"; | |
1207 | default: | |
1208 | break; | |
1209 | } | |
1210 | ||
1211 | return "UNKNOWN"; | |
1212 | } | |
1213 | ||
1214 | void transport_dump_dev_state( | |
1215 | struct se_device *dev, | |
1216 | char *b, | |
1217 | int *bl) | |
1218 | { | |
1219 | *bl += sprintf(b + *bl, "Status: "); | |
1220 | switch (dev->dev_status) { | |
1221 | case TRANSPORT_DEVICE_ACTIVATED: | |
1222 | *bl += sprintf(b + *bl, "ACTIVATED"); | |
1223 | break; | |
1224 | case TRANSPORT_DEVICE_DEACTIVATED: | |
1225 | *bl += sprintf(b + *bl, "DEACTIVATED"); | |
1226 | break; | |
1227 | case TRANSPORT_DEVICE_SHUTDOWN: | |
1228 | *bl += sprintf(b + *bl, "SHUTDOWN"); | |
1229 | break; | |
1230 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | |
1231 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | |
1232 | *bl += sprintf(b + *bl, "OFFLINE"); | |
1233 | break; | |
1234 | default: | |
1235 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | |
1236 | break; | |
1237 | } | |
1238 | ||
1239 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | |
1240 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | |
1241 | dev->queue_depth); | |
1242 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | |
1243 | DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); | |
1244 | *bl += sprintf(b + *bl, " "); | |
1245 | } | |
1246 | ||
1247 | /* transport_release_all_cmds(): | |
1248 | * | |
1249 | * | |
1250 | */ | |
1251 | static void transport_release_all_cmds(struct se_device *dev) | |
1252 | { | |
1253 | struct se_cmd *cmd = NULL; | |
1254 | struct se_queue_req *qr = NULL, *qr_p = NULL; | |
1255 | int bug_out = 0, t_state; | |
1256 | unsigned long flags; | |
1257 | ||
1258 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
1259 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, | |
1260 | qr_list) { | |
1261 | ||
1262 | cmd = (struct se_cmd *)qr->cmd; | |
1263 | t_state = qr->state; | |
1264 | list_del(&qr->qr_list); | |
1265 | kfree(qr); | |
1266 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, | |
1267 | flags); | |
1268 | ||
1269 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," | |
1270 | " t_state: %u directly\n", | |
1271 | CMD_TFO(cmd)->get_task_tag(cmd), | |
1272 | CMD_TFO(cmd)->get_cmd_state(cmd), t_state); | |
1273 | ||
1274 | transport_release_fe_cmd(cmd); | |
1275 | bug_out = 1; | |
1276 | ||
1277 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
1278 | } | |
1279 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
1280 | #if 0 | |
1281 | if (bug_out) | |
1282 | BUG(); | |
1283 | #endif | |
1284 | } | |
1285 | ||
1286 | void transport_dump_vpd_proto_id( | |
1287 | struct t10_vpd *vpd, | |
1288 | unsigned char *p_buf, | |
1289 | int p_buf_len) | |
1290 | { | |
1291 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1292 | int len; | |
1293 | ||
1294 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1295 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | |
1296 | ||
1297 | switch (vpd->protocol_identifier) { | |
1298 | case 0x00: | |
1299 | sprintf(buf+len, "Fibre Channel\n"); | |
1300 | break; | |
1301 | case 0x10: | |
1302 | sprintf(buf+len, "Parallel SCSI\n"); | |
1303 | break; | |
1304 | case 0x20: | |
1305 | sprintf(buf+len, "SSA\n"); | |
1306 | break; | |
1307 | case 0x30: | |
1308 | sprintf(buf+len, "IEEE 1394\n"); | |
1309 | break; | |
1310 | case 0x40: | |
1311 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | |
1312 | " Protocol\n"); | |
1313 | break; | |
1314 | case 0x50: | |
1315 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | |
1316 | break; | |
1317 | case 0x60: | |
1318 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | |
1319 | break; | |
1320 | case 0x70: | |
1321 | sprintf(buf+len, "Automation/Drive Interface Transport" | |
1322 | " Protocol\n"); | |
1323 | break; | |
1324 | case 0x80: | |
1325 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | |
1326 | break; | |
1327 | default: | |
1328 | sprintf(buf+len, "Unknown 0x%02x\n", | |
1329 | vpd->protocol_identifier); | |
1330 | break; | |
1331 | } | |
1332 | ||
1333 | if (p_buf) | |
1334 | strncpy(p_buf, buf, p_buf_len); | |
1335 | else | |
1336 | printk(KERN_INFO "%s", buf); | |
1337 | } | |
1338 | ||
1339 | void | |
1340 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | |
1341 | { | |
1342 | /* | |
1343 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | |
1344 | * | |
1345 | * from spc3r23.pdf section 7.5.1 | |
1346 | */ | |
1347 | if (page_83[1] & 0x80) { | |
1348 | vpd->protocol_identifier = (page_83[0] & 0xf0); | |
1349 | vpd->protocol_identifier_set = 1; | |
1350 | transport_dump_vpd_proto_id(vpd, NULL, 0); | |
1351 | } | |
1352 | } | |
1353 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | |
1354 | ||
1355 | int transport_dump_vpd_assoc( | |
1356 | struct t10_vpd *vpd, | |
1357 | unsigned char *p_buf, | |
1358 | int p_buf_len) | |
1359 | { | |
1360 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1361 | int ret = 0, len; | |
1362 | ||
1363 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1364 | len = sprintf(buf, "T10 VPD Identifier Association: "); | |
1365 | ||
1366 | switch (vpd->association) { | |
1367 | case 0x00: | |
1368 | sprintf(buf+len, "addressed logical unit\n"); | |
1369 | break; | |
1370 | case 0x10: | |
1371 | sprintf(buf+len, "target port\n"); | |
1372 | break; | |
1373 | case 0x20: | |
1374 | sprintf(buf+len, "SCSI target device\n"); | |
1375 | break; | |
1376 | default: | |
1377 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | |
1378 | ret = -1; | |
1379 | break; | |
1380 | } | |
1381 | ||
1382 | if (p_buf) | |
1383 | strncpy(p_buf, buf, p_buf_len); | |
1384 | else | |
1385 | printk("%s", buf); | |
1386 | ||
1387 | return ret; | |
1388 | } | |
1389 | ||
1390 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | |
1391 | { | |
1392 | /* | |
1393 | * The VPD identification association.. | |
1394 | * | |
1395 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | |
1396 | */ | |
1397 | vpd->association = (page_83[1] & 0x30); | |
1398 | return transport_dump_vpd_assoc(vpd, NULL, 0); | |
1399 | } | |
1400 | EXPORT_SYMBOL(transport_set_vpd_assoc); | |
1401 | ||
1402 | int transport_dump_vpd_ident_type( | |
1403 | struct t10_vpd *vpd, | |
1404 | unsigned char *p_buf, | |
1405 | int p_buf_len) | |
1406 | { | |
1407 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1408 | int ret = 0, len; | |
1409 | ||
1410 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1411 | len = sprintf(buf, "T10 VPD Identifier Type: "); | |
1412 | ||
1413 | switch (vpd->device_identifier_type) { | |
1414 | case 0x00: | |
1415 | sprintf(buf+len, "Vendor specific\n"); | |
1416 | break; | |
1417 | case 0x01: | |
1418 | sprintf(buf+len, "T10 Vendor ID based\n"); | |
1419 | break; | |
1420 | case 0x02: | |
1421 | sprintf(buf+len, "EUI-64 based\n"); | |
1422 | break; | |
1423 | case 0x03: | |
1424 | sprintf(buf+len, "NAA\n"); | |
1425 | break; | |
1426 | case 0x04: | |
1427 | sprintf(buf+len, "Relative target port identifier\n"); | |
1428 | break; | |
1429 | case 0x08: | |
1430 | sprintf(buf+len, "SCSI name string\n"); | |
1431 | break; | |
1432 | default: | |
1433 | sprintf(buf+len, "Unsupported: 0x%02x\n", | |
1434 | vpd->device_identifier_type); | |
1435 | ret = -1; | |
1436 | break; | |
1437 | } | |
1438 | ||
1439 | if (p_buf) | |
1440 | strncpy(p_buf, buf, p_buf_len); | |
1441 | else | |
1442 | printk("%s", buf); | |
1443 | ||
1444 | return ret; | |
1445 | } | |
1446 | ||
1447 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | |
1448 | { | |
1449 | /* | |
1450 | * The VPD identifier type.. | |
1451 | * | |
1452 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | |
1453 | */ | |
1454 | vpd->device_identifier_type = (page_83[1] & 0x0f); | |
1455 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | |
1456 | } | |
1457 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | |
1458 | ||
1459 | int transport_dump_vpd_ident( | |
1460 | struct t10_vpd *vpd, | |
1461 | unsigned char *p_buf, | |
1462 | int p_buf_len) | |
1463 | { | |
1464 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1465 | int ret = 0; | |
1466 | ||
1467 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1468 | ||
1469 | switch (vpd->device_identifier_code_set) { | |
1470 | case 0x01: /* Binary */ | |
1471 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | |
1472 | &vpd->device_identifier[0]); | |
1473 | break; | |
1474 | case 0x02: /* ASCII */ | |
1475 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | |
1476 | &vpd->device_identifier[0]); | |
1477 | break; | |
1478 | case 0x03: /* UTF-8 */ | |
1479 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | |
1480 | &vpd->device_identifier[0]); | |
1481 | break; | |
1482 | default: | |
1483 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | |
1484 | " 0x%02x", vpd->device_identifier_code_set); | |
1485 | ret = -1; | |
1486 | break; | |
1487 | } | |
1488 | ||
1489 | if (p_buf) | |
1490 | strncpy(p_buf, buf, p_buf_len); | |
1491 | else | |
1492 | printk("%s", buf); | |
1493 | ||
1494 | return ret; | |
1495 | } | |
1496 | ||
1497 | int | |
1498 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | |
1499 | { | |
1500 | static const char hex_str[] = "0123456789abcdef"; | |
1501 | int j = 0, i = 4; /* offset to start of the identifer */ | |
1502 | ||
1503 | /* | |
1504 | * The VPD Code Set (encoding) | |
1505 | * | |
1506 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | |
1507 | */ | |
1508 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | |
1509 | switch (vpd->device_identifier_code_set) { | |
1510 | case 0x01: /* Binary */ | |
1511 | vpd->device_identifier[j++] = | |
1512 | hex_str[vpd->device_identifier_type]; | |
1513 | while (i < (4 + page_83[3])) { | |
1514 | vpd->device_identifier[j++] = | |
1515 | hex_str[(page_83[i] & 0xf0) >> 4]; | |
1516 | vpd->device_identifier[j++] = | |
1517 | hex_str[page_83[i] & 0x0f]; | |
1518 | i++; | |
1519 | } | |
1520 | break; | |
1521 | case 0x02: /* ASCII */ | |
1522 | case 0x03: /* UTF-8 */ | |
1523 | while (i < (4 + page_83[3])) | |
1524 | vpd->device_identifier[j++] = page_83[i++]; | |
1525 | break; | |
1526 | default: | |
1527 | break; | |
1528 | } | |
1529 | ||
1530 | return transport_dump_vpd_ident(vpd, NULL, 0); | |
1531 | } | |
1532 | EXPORT_SYMBOL(transport_set_vpd_ident); | |
1533 | ||
1534 | static void core_setup_task_attr_emulation(struct se_device *dev) | |
1535 | { | |
1536 | /* | |
1537 | * If this device is from Target_Core_Mod/pSCSI, disable the | |
1538 | * SAM Task Attribute emulation. | |
1539 | * | |
1540 | * This is currently not available in upsream Linux/SCSI Target | |
1541 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | |
1542 | */ | |
1543 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1544 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; | |
1545 | return; | |
1546 | } | |
1547 | ||
1548 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | |
1549 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | |
1550 | " device\n", TRANSPORT(dev)->name, | |
1551 | TRANSPORT(dev)->get_device_rev(dev)); | |
1552 | } | |
1553 | ||
1554 | static void scsi_dump_inquiry(struct se_device *dev) | |
1555 | { | |
1556 | struct t10_wwn *wwn = DEV_T10_WWN(dev); | |
1557 | int i, device_type; | |
1558 | /* | |
1559 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1560 | */ | |
1561 | printk(" Vendor: "); | |
1562 | for (i = 0; i < 8; i++) | |
1563 | if (wwn->vendor[i] >= 0x20) | |
1564 | printk("%c", wwn->vendor[i]); | |
1565 | else | |
1566 | printk(" "); | |
1567 | ||
1568 | printk(" Model: "); | |
1569 | for (i = 0; i < 16; i++) | |
1570 | if (wwn->model[i] >= 0x20) | |
1571 | printk("%c", wwn->model[i]); | |
1572 | else | |
1573 | printk(" "); | |
1574 | ||
1575 | printk(" Revision: "); | |
1576 | for (i = 0; i < 4; i++) | |
1577 | if (wwn->revision[i] >= 0x20) | |
1578 | printk("%c", wwn->revision[i]); | |
1579 | else | |
1580 | printk(" "); | |
1581 | ||
1582 | printk("\n"); | |
1583 | ||
1584 | device_type = TRANSPORT(dev)->get_device_type(dev); | |
1585 | printk(" Type: %s ", scsi_device_type(device_type)); | |
1586 | printk(" ANSI SCSI revision: %02x\n", | |
1587 | TRANSPORT(dev)->get_device_rev(dev)); | |
1588 | } | |
1589 | ||
1590 | struct se_device *transport_add_device_to_core_hba( | |
1591 | struct se_hba *hba, | |
1592 | struct se_subsystem_api *transport, | |
1593 | struct se_subsystem_dev *se_dev, | |
1594 | u32 device_flags, | |
1595 | void *transport_dev, | |
1596 | struct se_dev_limits *dev_limits, | |
1597 | const char *inquiry_prod, | |
1598 | const char *inquiry_rev) | |
1599 | { | |
1600 | int ret = 0, force_pt; | |
1601 | struct se_device *dev; | |
1602 | ||
1603 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | |
1604 | if (!(dev)) { | |
1605 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); | |
1606 | return NULL; | |
1607 | } | |
1608 | dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); | |
1609 | if (!(dev->dev_queue_obj)) { | |
1610 | printk(KERN_ERR "Unable to allocate memory for" | |
1611 | " dev->dev_queue_obj\n"); | |
1612 | kfree(dev); | |
1613 | return NULL; | |
1614 | } | |
1615 | transport_init_queue_obj(dev->dev_queue_obj); | |
1616 | ||
1617 | dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), | |
1618 | GFP_KERNEL); | |
1619 | if (!(dev->dev_status_queue_obj)) { | |
1620 | printk(KERN_ERR "Unable to allocate memory for" | |
1621 | " dev->dev_status_queue_obj\n"); | |
1622 | kfree(dev->dev_queue_obj); | |
1623 | kfree(dev); | |
1624 | return NULL; | |
1625 | } | |
1626 | transport_init_queue_obj(dev->dev_status_queue_obj); | |
1627 | ||
1628 | dev->dev_flags = device_flags; | |
1629 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
1630 | dev->dev_ptr = (void *) transport_dev; | |
1631 | dev->se_hba = hba; | |
1632 | dev->se_sub_dev = se_dev; | |
1633 | dev->transport = transport; | |
1634 | atomic_set(&dev->active_cmds, 0); | |
1635 | INIT_LIST_HEAD(&dev->dev_list); | |
1636 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1637 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1638 | INIT_LIST_HEAD(&dev->execute_task_list); | |
1639 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1640 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | |
1641 | INIT_LIST_HEAD(&dev->state_task_list); | |
1642 | spin_lock_init(&dev->execute_task_lock); | |
1643 | spin_lock_init(&dev->delayed_cmd_lock); | |
1644 | spin_lock_init(&dev->ordered_cmd_lock); | |
1645 | spin_lock_init(&dev->state_task_lock); | |
1646 | spin_lock_init(&dev->dev_alua_lock); | |
1647 | spin_lock_init(&dev->dev_reservation_lock); | |
1648 | spin_lock_init(&dev->dev_status_lock); | |
1649 | spin_lock_init(&dev->dev_status_thr_lock); | |
1650 | spin_lock_init(&dev->se_port_lock); | |
1651 | spin_lock_init(&dev->se_tmr_lock); | |
1652 | ||
1653 | dev->queue_depth = dev_limits->queue_depth; | |
1654 | atomic_set(&dev->depth_left, dev->queue_depth); | |
1655 | atomic_set(&dev->dev_ordered_id, 0); | |
1656 | ||
1657 | se_dev_set_default_attribs(dev, dev_limits); | |
1658 | ||
1659 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1660 | dev->creation_time = get_jiffies_64(); | |
1661 | spin_lock_init(&dev->stats_lock); | |
1662 | ||
1663 | spin_lock(&hba->device_lock); | |
1664 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | |
1665 | hba->dev_count++; | |
1666 | spin_unlock(&hba->device_lock); | |
1667 | /* | |
1668 | * Setup the SAM Task Attribute emulation for struct se_device | |
1669 | */ | |
1670 | core_setup_task_attr_emulation(dev); | |
1671 | /* | |
1672 | * Force PR and ALUA passthrough emulation with internal object use. | |
1673 | */ | |
1674 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | |
1675 | /* | |
1676 | * Setup the Reservations infrastructure for struct se_device | |
1677 | */ | |
1678 | core_setup_reservations(dev, force_pt); | |
1679 | /* | |
1680 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | |
1681 | */ | |
1682 | if (core_setup_alua(dev, force_pt) < 0) | |
1683 | goto out; | |
1684 | ||
1685 | /* | |
1686 | * Startup the struct se_device processing thread | |
1687 | */ | |
1688 | dev->process_thread = kthread_run(transport_processing_thread, dev, | |
1689 | "LIO_%s", TRANSPORT(dev)->name); | |
1690 | if (IS_ERR(dev->process_thread)) { | |
1691 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", | |
1692 | TRANSPORT(dev)->name); | |
1693 | goto out; | |
1694 | } | |
1695 | ||
1696 | /* | |
1697 | * Preload the initial INQUIRY const values if we are doing | |
1698 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1699 | * passthrough because this is being provided by the backend LLD. | |
1700 | * This is required so that transport_get_inquiry() copies these | |
1701 | * originals once back into DEV_T10_WWN(dev) for the virtual device | |
1702 | * setup. | |
1703 | */ | |
1704 | if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1705 | if (!(inquiry_prod) || !(inquiry_prod)) { | |
1706 | printk(KERN_ERR "All non TCM/pSCSI plugins require" | |
1707 | " INQUIRY consts\n"); | |
1708 | goto out; | |
1709 | } | |
1710 | ||
1711 | strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); | |
1712 | strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); | |
1713 | strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); | |
1714 | } | |
1715 | scsi_dump_inquiry(dev); | |
1716 | ||
1717 | out: | |
1718 | if (!ret) | |
1719 | return dev; | |
1720 | kthread_stop(dev->process_thread); | |
1721 | ||
1722 | spin_lock(&hba->device_lock); | |
1723 | list_del(&dev->dev_list); | |
1724 | hba->dev_count--; | |
1725 | spin_unlock(&hba->device_lock); | |
1726 | ||
1727 | se_release_vpd_for_dev(dev); | |
1728 | ||
1729 | kfree(dev->dev_status_queue_obj); | |
1730 | kfree(dev->dev_queue_obj); | |
1731 | kfree(dev); | |
1732 | ||
1733 | return NULL; | |
1734 | } | |
1735 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | |
1736 | ||
1737 | /* transport_generic_prepare_cdb(): | |
1738 | * | |
1739 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | |
1740 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | |
1741 | * The point of this is since we are mapping iSCSI LUNs to | |
1742 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | |
1743 | * devices and HBAs for a loop. | |
1744 | */ | |
1745 | static inline void transport_generic_prepare_cdb( | |
1746 | unsigned char *cdb) | |
1747 | { | |
1748 | switch (cdb[0]) { | |
1749 | case READ_10: /* SBC - RDProtect */ | |
1750 | case READ_12: /* SBC - RDProtect */ | |
1751 | case READ_16: /* SBC - RDProtect */ | |
1752 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | |
1753 | case VERIFY: /* SBC - VRProtect */ | |
1754 | case VERIFY_16: /* SBC - VRProtect */ | |
1755 | case WRITE_VERIFY: /* SBC - VRProtect */ | |
1756 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | |
1757 | break; | |
1758 | default: | |
1759 | cdb[1] &= 0x1f; /* clear logical unit number */ | |
1760 | break; | |
1761 | } | |
1762 | } | |
1763 | ||
1764 | static struct se_task * | |
1765 | transport_generic_get_task(struct se_cmd *cmd, | |
1766 | enum dma_data_direction data_direction) | |
1767 | { | |
1768 | struct se_task *task; | |
1769 | struct se_device *dev = SE_DEV(cmd); | |
1770 | unsigned long flags; | |
1771 | ||
1772 | task = dev->transport->alloc_task(cmd); | |
1773 | if (!task) { | |
1774 | printk(KERN_ERR "Unable to allocate struct se_task\n"); | |
1775 | return NULL; | |
1776 | } | |
1777 | ||
1778 | INIT_LIST_HEAD(&task->t_list); | |
1779 | INIT_LIST_HEAD(&task->t_execute_list); | |
1780 | INIT_LIST_HEAD(&task->t_state_list); | |
1781 | init_completion(&task->task_stop_comp); | |
1782 | task->task_no = T_TASK(cmd)->t_tasks_no++; | |
1783 | task->task_se_cmd = cmd; | |
1784 | task->se_dev = dev; | |
1785 | task->task_data_direction = data_direction; | |
1786 | ||
1787 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
1788 | list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); | |
1789 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
1790 | ||
1791 | return task; | |
1792 | } | |
1793 | ||
1794 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |
1795 | ||
1796 | void transport_device_setup_cmd(struct se_cmd *cmd) | |
1797 | { | |
1798 | cmd->se_dev = SE_LUN(cmd)->lun_se_dev; | |
1799 | } | |
1800 | EXPORT_SYMBOL(transport_device_setup_cmd); | |
1801 | ||
1802 | /* | |
1803 | * Used by fabric modules containing a local struct se_cmd within their | |
1804 | * fabric dependent per I/O descriptor. | |
1805 | */ | |
1806 | void transport_init_se_cmd( | |
1807 | struct se_cmd *cmd, | |
1808 | struct target_core_fabric_ops *tfo, | |
1809 | struct se_session *se_sess, | |
1810 | u32 data_length, | |
1811 | int data_direction, | |
1812 | int task_attr, | |
1813 | unsigned char *sense_buffer) | |
1814 | { | |
1815 | INIT_LIST_HEAD(&cmd->se_lun_list); | |
1816 | INIT_LIST_HEAD(&cmd->se_delayed_list); | |
1817 | INIT_LIST_HEAD(&cmd->se_ordered_list); | |
1818 | /* | |
1819 | * Setup t_task pointer to t_task_backstore | |
1820 | */ | |
1821 | cmd->t_task = &cmd->t_task_backstore; | |
1822 | ||
1823 | INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); | |
1824 | init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | |
1825 | init_completion(&T_TASK(cmd)->transport_lun_stop_comp); | |
1826 | init_completion(&T_TASK(cmd)->t_transport_stop_comp); | |
1827 | spin_lock_init(&T_TASK(cmd)->t_state_lock); | |
1828 | atomic_set(&T_TASK(cmd)->transport_dev_active, 1); | |
1829 | ||
1830 | cmd->se_tfo = tfo; | |
1831 | cmd->se_sess = se_sess; | |
1832 | cmd->data_length = data_length; | |
1833 | cmd->data_direction = data_direction; | |
1834 | cmd->sam_task_attr = task_attr; | |
1835 | cmd->sense_buffer = sense_buffer; | |
1836 | } | |
1837 | EXPORT_SYMBOL(transport_init_se_cmd); | |
1838 | ||
1839 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |
1840 | { | |
1841 | /* | |
1842 | * Check if SAM Task Attribute emulation is enabled for this | |
1843 | * struct se_device storage object | |
1844 | */ | |
1845 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | |
1846 | return 0; | |
1847 | ||
1848 | if (cmd->sam_task_attr == TASK_ATTR_ACA) { | |
1849 | DEBUG_STA("SAM Task Attribute ACA" | |
1850 | " emulation is not supported\n"); | |
1851 | return -1; | |
1852 | } | |
1853 | /* | |
1854 | * Used to determine when ORDERED commands should go from | |
1855 | * Dormant to Active status. | |
1856 | */ | |
1857 | cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); | |
1858 | smp_mb__after_atomic_inc(); | |
1859 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | |
1860 | cmd->se_ordered_id, cmd->sam_task_attr, | |
1861 | TRANSPORT(cmd->se_dev)->name); | |
1862 | return 0; | |
1863 | } | |
1864 | ||
1865 | void transport_free_se_cmd( | |
1866 | struct se_cmd *se_cmd) | |
1867 | { | |
1868 | if (se_cmd->se_tmr_req) | |
1869 | core_tmr_release_req(se_cmd->se_tmr_req); | |
1870 | /* | |
1871 | * Check and free any extended CDB buffer that was allocated | |
1872 | */ | |
1873 | if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) | |
1874 | kfree(T_TASK(se_cmd)->t_task_cdb); | |
1875 | } | |
1876 | EXPORT_SYMBOL(transport_free_se_cmd); | |
1877 | ||
1878 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | |
1879 | ||
1880 | /* transport_generic_allocate_tasks(): | |
1881 | * | |
1882 | * Called from fabric RX Thread. | |
1883 | */ | |
1884 | int transport_generic_allocate_tasks( | |
1885 | struct se_cmd *cmd, | |
1886 | unsigned char *cdb) | |
1887 | { | |
1888 | int ret; | |
1889 | ||
1890 | transport_generic_prepare_cdb(cdb); | |
1891 | ||
1892 | /* | |
1893 | * This is needed for early exceptions. | |
1894 | */ | |
1895 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
1896 | ||
1897 | transport_device_setup_cmd(cmd); | |
1898 | /* | |
1899 | * Ensure that the received CDB is less than the max (252 + 8) bytes | |
1900 | * for VARIABLE_LENGTH_CMD | |
1901 | */ | |
1902 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | |
1903 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" | |
1904 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | |
1905 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | |
1906 | return -1; | |
1907 | } | |
1908 | /* | |
1909 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | |
1910 | * allocate the additional extended CDB buffer now.. Otherwise | |
1911 | * setup the pointer from __t_task_cdb to t_task_cdb. | |
1912 | */ | |
1913 | if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { | |
1914 | T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), | |
1915 | GFP_KERNEL); | |
1916 | if (!(T_TASK(cmd)->t_task_cdb)) { | |
1917 | printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" | |
1918 | " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", | |
1919 | scsi_command_size(cdb), | |
1920 | (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); | |
1921 | return -1; | |
1922 | } | |
1923 | } else | |
1924 | T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; | |
1925 | /* | |
1926 | * Copy the original CDB into T_TASK(cmd). | |
1927 | */ | |
1928 | memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); | |
1929 | /* | |
1930 | * Setup the received CDB based on SCSI defined opcodes and | |
1931 | * perform unit attention, persistent reservations and ALUA | |
1932 | * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb | |
1933 | * pointer is expected to be setup before we reach this point. | |
1934 | */ | |
1935 | ret = transport_generic_cmd_sequencer(cmd, cdb); | |
1936 | if (ret < 0) | |
1937 | return ret; | |
1938 | /* | |
1939 | * Check for SAM Task Attribute Emulation | |
1940 | */ | |
1941 | if (transport_check_alloc_task_attr(cmd) < 0) { | |
1942 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
1943 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
1944 | return -2; | |
1945 | } | |
1946 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
1947 | if (cmd->se_lun->lun_sep) | |
1948 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | |
1949 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
1950 | return 0; | |
1951 | } | |
1952 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | |
1953 | ||
1954 | /* | |
1955 | * Used by fabric module frontends not defining a TFO->new_cmd_map() | |
1956 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis | |
1957 | */ | |
1958 | int transport_generic_handle_cdb( | |
1959 | struct se_cmd *cmd) | |
1960 | { | |
1961 | if (!SE_LUN(cmd)) { | |
1962 | dump_stack(); | |
1963 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | |
1964 | return -1; | |
1965 | } | |
1966 | ||
1967 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); | |
1968 | return 0; | |
1969 | } | |
1970 | EXPORT_SYMBOL(transport_generic_handle_cdb); | |
1971 | ||
1972 | /* | |
1973 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | |
1974 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | |
1975 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | |
1976 | */ | |
1977 | int transport_generic_handle_cdb_map( | |
1978 | struct se_cmd *cmd) | |
1979 | { | |
1980 | if (!SE_LUN(cmd)) { | |
1981 | dump_stack(); | |
1982 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | |
1983 | return -1; | |
1984 | } | |
1985 | ||
1986 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | |
1987 | return 0; | |
1988 | } | |
1989 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | |
1990 | ||
1991 | /* transport_generic_handle_data(): | |
1992 | * | |
1993 | * | |
1994 | */ | |
1995 | int transport_generic_handle_data( | |
1996 | struct se_cmd *cmd) | |
1997 | { | |
1998 | /* | |
1999 | * For the software fabric case, then we assume the nexus is being | |
2000 | * failed/shutdown when signals are pending from the kthread context | |
2001 | * caller, so we return a failure. For the HW target mode case running | |
2002 | * in interrupt code, the signal_pending() check is skipped. | |
2003 | */ | |
2004 | if (!in_interrupt() && signal_pending(current)) | |
2005 | return -1; | |
2006 | /* | |
2007 | * If the received CDB has aleady been ABORTED by the generic | |
2008 | * target engine, we now call transport_check_aborted_status() | |
2009 | * to queue any delated TASK_ABORTED status for the received CDB to the | |
2010 | * fabric module as we are expecting no futher incoming DATA OUT | |
2011 | * sequences at this point. | |
2012 | */ | |
2013 | if (transport_check_aborted_status(cmd, 1) != 0) | |
2014 | return 0; | |
2015 | ||
2016 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | |
2017 | return 0; | |
2018 | } | |
2019 | EXPORT_SYMBOL(transport_generic_handle_data); | |
2020 | ||
2021 | /* transport_generic_handle_tmr(): | |
2022 | * | |
2023 | * | |
2024 | */ | |
2025 | int transport_generic_handle_tmr( | |
2026 | struct se_cmd *cmd) | |
2027 | { | |
2028 | /* | |
2029 | * This is needed for early exceptions. | |
2030 | */ | |
2031 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
2032 | transport_device_setup_cmd(cmd); | |
2033 | ||
2034 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | |
2035 | return 0; | |
2036 | } | |
2037 | EXPORT_SYMBOL(transport_generic_handle_tmr); | |
2038 | ||
2039 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |
2040 | { | |
2041 | struct se_task *task, *task_tmp; | |
2042 | unsigned long flags; | |
2043 | int ret = 0; | |
2044 | ||
2045 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", | |
2046 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
2047 | ||
2048 | /* | |
2049 | * No tasks remain in the execution queue | |
2050 | */ | |
2051 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2052 | list_for_each_entry_safe(task, task_tmp, | |
2053 | &T_TASK(cmd)->t_task_list, t_list) { | |
2054 | DEBUG_TS("task_no[%d] - Processing task %p\n", | |
2055 | task->task_no, task); | |
2056 | /* | |
2057 | * If the struct se_task has not been sent and is not active, | |
2058 | * remove the struct se_task from the execution queue. | |
2059 | */ | |
2060 | if (!atomic_read(&task->task_sent) && | |
2061 | !atomic_read(&task->task_active)) { | |
2062 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
2063 | flags); | |
2064 | transport_remove_task_from_execute_queue(task, | |
2065 | task->se_dev); | |
2066 | ||
2067 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", | |
2068 | task->task_no); | |
2069 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2070 | continue; | |
2071 | } | |
2072 | ||
2073 | /* | |
2074 | * If the struct se_task is active, sleep until it is returned | |
2075 | * from the plugin. | |
2076 | */ | |
2077 | if (atomic_read(&task->task_active)) { | |
2078 | atomic_set(&task->task_stop, 1); | |
2079 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
2080 | flags); | |
2081 | ||
2082 | DEBUG_TS("task_no[%d] - Waiting to complete\n", | |
2083 | task->task_no); | |
2084 | wait_for_completion(&task->task_stop_comp); | |
2085 | DEBUG_TS("task_no[%d] - Stopped successfully\n", | |
2086 | task->task_no); | |
2087 | ||
2088 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2089 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | |
2090 | ||
2091 | atomic_set(&task->task_active, 0); | |
2092 | atomic_set(&task->task_stop, 0); | |
2093 | } else { | |
2094 | DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); | |
2095 | ret++; | |
2096 | } | |
2097 | ||
2098 | __transport_stop_task_timer(task, &flags); | |
2099 | } | |
2100 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2101 | ||
2102 | return ret; | |
2103 | } | |
2104 | ||
2105 | static void transport_failure_reset_queue_depth(struct se_device *dev) | |
2106 | { | |
2107 | unsigned long flags; | |
2108 | ||
2109 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);; | |
2110 | atomic_inc(&dev->depth_left); | |
2111 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | |
2112 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2113 | } | |
2114 | ||
2115 | /* | |
2116 | * Handle SAM-esque emulation for generic transport request failures. | |
2117 | */ | |
2118 | static void transport_generic_request_failure( | |
2119 | struct se_cmd *cmd, | |
2120 | struct se_device *dev, | |
2121 | int complete, | |
2122 | int sc) | |
2123 | { | |
2124 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | |
2125 | " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | |
2126 | T_TASK(cmd)->t_task_cdb[0]); | |
2127 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" | |
2128 | " %d/%d transport_error_status: %d\n", | |
2129 | CMD_TFO(cmd)->get_cmd_state(cmd), | |
2130 | cmd->t_state, cmd->deferred_t_state, | |
2131 | cmd->transport_error_status); | |
2132 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" | |
2133 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | |
2134 | " t_transport_active: %d t_transport_stop: %d" | |
2135 | " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, | |
2136 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | |
2137 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | |
2138 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), | |
2139 | atomic_read(&T_TASK(cmd)->t_transport_active), | |
2140 | atomic_read(&T_TASK(cmd)->t_transport_stop), | |
2141 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | |
2142 | ||
2143 | transport_stop_all_task_timers(cmd); | |
2144 | ||
2145 | if (dev) | |
2146 | transport_failure_reset_queue_depth(dev); | |
2147 | /* | |
2148 | * For SAM Task Attribute emulation for failed struct se_cmd | |
2149 | */ | |
2150 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
2151 | transport_complete_task_attr(cmd); | |
2152 | ||
2153 | if (complete) { | |
2154 | transport_direct_request_timeout(cmd); | |
2155 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
2156 | } | |
2157 | ||
2158 | switch (cmd->transport_error_status) { | |
2159 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | |
2160 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2161 | break; | |
2162 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | |
2163 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
2164 | break; | |
2165 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | |
2166 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
2167 | break; | |
2168 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | |
2169 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | |
2170 | break; | |
2171 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | |
2172 | if (!sc) | |
2173 | transport_new_cmd_failure(cmd); | |
2174 | /* | |
2175 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | |
2176 | * we force this session to fall back to session | |
2177 | * recovery. | |
2178 | */ | |
2179 | CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); | |
2180 | CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); | |
2181 | ||
2182 | goto check_stop; | |
2183 | case PYX_TRANSPORT_LU_COMM_FAILURE: | |
2184 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | |
2185 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
2186 | break; | |
2187 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | |
2188 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | |
2189 | break; | |
2190 | case PYX_TRANSPORT_WRITE_PROTECTED: | |
2191 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
2192 | break; | |
2193 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | |
2194 | /* | |
2195 | * No SENSE Data payload for this case, set SCSI Status | |
2196 | * and queue the response to $FABRIC_MOD. | |
2197 | * | |
2198 | * Uses linux/include/scsi/scsi.h SAM status codes defs | |
2199 | */ | |
2200 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2201 | /* | |
2202 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2203 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2204 | * CONFLICT STATUS. | |
2205 | * | |
2206 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2207 | */ | |
2208 | if (SE_SESS(cmd) && | |
2209 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | |
2210 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | |
2211 | cmd->orig_fe_lun, 0x2C, | |
2212 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
2213 | ||
2214 | CMD_TFO(cmd)->queue_status(cmd); | |
2215 | goto check_stop; | |
2216 | case PYX_TRANSPORT_USE_SENSE_REASON: | |
2217 | /* | |
2218 | * struct se_cmd->scsi_sense_reason already set | |
2219 | */ | |
2220 | break; | |
2221 | default: | |
2222 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", | |
2223 | T_TASK(cmd)->t_task_cdb[0], | |
2224 | cmd->transport_error_status); | |
2225 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2226 | break; | |
2227 | } | |
2228 | ||
2229 | if (!sc) | |
2230 | transport_new_cmd_failure(cmd); | |
2231 | else | |
2232 | transport_send_check_condition_and_sense(cmd, | |
2233 | cmd->scsi_sense_reason, 0); | |
2234 | check_stop: | |
2235 | transport_lun_remove_cmd(cmd); | |
2236 | if (!(transport_cmd_check_stop_to_fabric(cmd))) | |
2237 | ; | |
2238 | } | |
2239 | ||
2240 | static void transport_direct_request_timeout(struct se_cmd *cmd) | |
2241 | { | |
2242 | unsigned long flags; | |
2243 | ||
2244 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2245 | if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { | |
2246 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2247 | return; | |
2248 | } | |
2249 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { | |
2250 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2251 | return; | |
2252 | } | |
2253 | ||
2254 | atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), | |
2255 | &T_TASK(cmd)->t_se_count); | |
2256 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2257 | } | |
2258 | ||
2259 | static void transport_generic_request_timeout(struct se_cmd *cmd) | |
2260 | { | |
2261 | unsigned long flags; | |
2262 | ||
2263 | /* | |
2264 | * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() | |
2265 | * to allow last call to free memory resources. | |
2266 | */ | |
2267 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2268 | if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { | |
2269 | int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); | |
2270 | ||
2271 | atomic_sub(tmp, &T_TASK(cmd)->t_se_count); | |
2272 | } | |
2273 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2274 | ||
2275 | transport_generic_remove(cmd, 0, 0); | |
2276 | } | |
2277 | ||
2278 | static int | |
2279 | transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) | |
2280 | { | |
2281 | unsigned char *buf; | |
2282 | ||
2283 | buf = kzalloc(data_length, GFP_KERNEL); | |
2284 | if (!(buf)) { | |
2285 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); | |
2286 | return -1; | |
2287 | } | |
2288 | ||
2289 | T_TASK(cmd)->t_tasks_se_num = 0; | |
2290 | T_TASK(cmd)->t_task_buf = buf; | |
2291 | ||
2292 | return 0; | |
2293 | } | |
2294 | ||
2295 | static inline u32 transport_lba_21(unsigned char *cdb) | |
2296 | { | |
2297 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | |
2298 | } | |
2299 | ||
2300 | static inline u32 transport_lba_32(unsigned char *cdb) | |
2301 | { | |
2302 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2303 | } | |
2304 | ||
2305 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | |
2306 | { | |
2307 | unsigned int __v1, __v2; | |
2308 | ||
2309 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2310 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
2311 | ||
2312 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2313 | } | |
2314 | ||
2315 | /* | |
2316 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | |
2317 | */ | |
2318 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | |
2319 | { | |
2320 | unsigned int __v1, __v2; | |
2321 | ||
2322 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | |
2323 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | |
2324 | ||
2325 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2326 | } | |
2327 | ||
2328 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |
2329 | { | |
2330 | unsigned long flags; | |
2331 | ||
2332 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | |
2333 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | |
2334 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | |
2335 | } | |
2336 | ||
2337 | /* | |
2338 | * Called from interrupt context. | |
2339 | */ | |
2340 | static void transport_task_timeout_handler(unsigned long data) | |
2341 | { | |
2342 | struct se_task *task = (struct se_task *)data; | |
2343 | struct se_cmd *cmd = TASK_CMD(task); | |
2344 | unsigned long flags; | |
2345 | ||
2346 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | |
2347 | ||
2348 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2349 | if (task->task_flags & TF_STOP) { | |
2350 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2351 | return; | |
2352 | } | |
2353 | task->task_flags &= ~TF_RUNNING; | |
2354 | ||
2355 | /* | |
2356 | * Determine if transport_complete_task() has already been called. | |
2357 | */ | |
2358 | if (!(atomic_read(&task->task_active))) { | |
2359 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" | |
2360 | " == 0\n", task, cmd); | |
2361 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2362 | return; | |
2363 | } | |
2364 | ||
2365 | atomic_inc(&T_TASK(cmd)->t_se_count); | |
2366 | atomic_inc(&T_TASK(cmd)->t_transport_timeout); | |
2367 | T_TASK(cmd)->t_tasks_failed = 1; | |
2368 | ||
2369 | atomic_set(&task->task_timeout, 1); | |
2370 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | |
2371 | task->task_scsi_status = 1; | |
2372 | ||
2373 | if (atomic_read(&task->task_stop)) { | |
2374 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" | |
2375 | " == 1\n", task, cmd); | |
2376 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2377 | complete(&task->task_stop_comp); | |
2378 | return; | |
2379 | } | |
2380 | ||
2381 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | |
2382 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" | |
2383 | " t_task_cdbs_left\n", task, cmd); | |
2384 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2385 | return; | |
2386 | } | |
2387 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | |
2388 | task, cmd); | |
2389 | ||
2390 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | |
2391 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2392 | ||
2393 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | |
2394 | } | |
2395 | ||
2396 | /* | |
2397 | * Called with T_TASK(cmd)->t_state_lock held. | |
2398 | */ | |
2399 | static void transport_start_task_timer(struct se_task *task) | |
2400 | { | |
2401 | struct se_device *dev = task->se_dev; | |
2402 | int timeout; | |
2403 | ||
2404 | if (task->task_flags & TF_RUNNING) | |
2405 | return; | |
2406 | /* | |
2407 | * If the task_timeout is disabled, exit now. | |
2408 | */ | |
2409 | timeout = DEV_ATTRIB(dev)->task_timeout; | |
2410 | if (!(timeout)) | |
2411 | return; | |
2412 | ||
2413 | init_timer(&task->task_timer); | |
2414 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | |
2415 | task->task_timer.data = (unsigned long) task; | |
2416 | task->task_timer.function = transport_task_timeout_handler; | |
2417 | ||
2418 | task->task_flags |= TF_RUNNING; | |
2419 | add_timer(&task->task_timer); | |
2420 | #if 0 | |
2421 | printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" | |
2422 | " %d\n", task->task_se_cmd, task, timeout); | |
2423 | #endif | |
2424 | } | |
2425 | ||
2426 | /* | |
2427 | * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. | |
2428 | */ | |
2429 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | |
2430 | { | |
2431 | struct se_cmd *cmd = TASK_CMD(task); | |
2432 | ||
2433 | if (!(task->task_flags & TF_RUNNING)) | |
2434 | return; | |
2435 | ||
2436 | task->task_flags |= TF_STOP; | |
2437 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); | |
2438 | ||
2439 | del_timer_sync(&task->task_timer); | |
2440 | ||
2441 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); | |
2442 | task->task_flags &= ~TF_RUNNING; | |
2443 | task->task_flags &= ~TF_STOP; | |
2444 | } | |
2445 | ||
2446 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | |
2447 | { | |
2448 | struct se_task *task = NULL, *task_tmp; | |
2449 | unsigned long flags; | |
2450 | ||
2451 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2452 | list_for_each_entry_safe(task, task_tmp, | |
2453 | &T_TASK(cmd)->t_task_list, t_list) | |
2454 | __transport_stop_task_timer(task, &flags); | |
2455 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2456 | } | |
2457 | ||
2458 | static inline int transport_tcq_window_closed(struct se_device *dev) | |
2459 | { | |
2460 | if (dev->dev_tcq_window_closed++ < | |
2461 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | |
2462 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | |
2463 | } else | |
2464 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | |
2465 | ||
2466 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | |
2467 | return 0; | |
2468 | } | |
2469 | ||
2470 | /* | |
2471 | * Called from Fabric Module context from transport_execute_tasks() | |
2472 | * | |
2473 | * The return of this function determins if the tasks from struct se_cmd | |
2474 | * get added to the execution queue in transport_execute_tasks(), | |
2475 | * or are added to the delayed or ordered lists here. | |
2476 | */ | |
2477 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | |
2478 | { | |
2479 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | |
2480 | return 1; | |
2481 | /* | |
2482 | * Check for the existance of HEAD_OF_QUEUE, and if true return 1 | |
2483 | * to allow the passed struct se_cmd list of tasks to the front of the list. | |
2484 | */ | |
2485 | if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | |
2486 | atomic_inc(&SE_DEV(cmd)->dev_hoq_count); | |
2487 | smp_mb__after_atomic_inc(); | |
2488 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" | |
2489 | " 0x%02x, se_ordered_id: %u\n", | |
2490 | T_TASK(cmd)->t_task_cdb[0], | |
2491 | cmd->se_ordered_id); | |
2492 | return 1; | |
2493 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | |
2494 | spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); | |
2495 | list_add_tail(&cmd->se_ordered_list, | |
2496 | &SE_DEV(cmd)->ordered_cmd_list); | |
2497 | spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); | |
2498 | ||
2499 | atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); | |
2500 | smp_mb__after_atomic_inc(); | |
2501 | ||
2502 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" | |
2503 | " list, se_ordered_id: %u\n", | |
2504 | T_TASK(cmd)->t_task_cdb[0], | |
2505 | cmd->se_ordered_id); | |
2506 | /* | |
2507 | * Add ORDERED command to tail of execution queue if | |
2508 | * no other older commands exist that need to be | |
2509 | * completed first. | |
2510 | */ | |
2511 | if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) | |
2512 | return 1; | |
2513 | } else { | |
2514 | /* | |
2515 | * For SIMPLE and UNTAGGED Task Attribute commands | |
2516 | */ | |
2517 | atomic_inc(&SE_DEV(cmd)->simple_cmds); | |
2518 | smp_mb__after_atomic_inc(); | |
2519 | } | |
2520 | /* | |
2521 | * Otherwise if one or more outstanding ORDERED task attribute exist, | |
2522 | * add the dormant task(s) built for the passed struct se_cmd to the | |
2523 | * execution queue and become in Active state for this struct se_device. | |
2524 | */ | |
2525 | if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { | |
2526 | /* | |
2527 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | |
2528 | * will be drained upon competion of HEAD_OF_QUEUE task. | |
2529 | */ | |
2530 | spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); | |
2531 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; | |
2532 | list_add_tail(&cmd->se_delayed_list, | |
2533 | &SE_DEV(cmd)->delayed_cmd_list); | |
2534 | spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); | |
2535 | ||
2536 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" | |
2537 | " delayed CMD list, se_ordered_id: %u\n", | |
2538 | T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, | |
2539 | cmd->se_ordered_id); | |
2540 | /* | |
2541 | * Return zero to let transport_execute_tasks() know | |
2542 | * not to add the delayed tasks to the execution list. | |
2543 | */ | |
2544 | return 0; | |
2545 | } | |
2546 | /* | |
2547 | * Otherwise, no ORDERED task attributes exist.. | |
2548 | */ | |
2549 | return 1; | |
2550 | } | |
2551 | ||
2552 | /* | |
2553 | * Called from fabric module context in transport_generic_new_cmd() and | |
2554 | * transport_generic_process_write() | |
2555 | */ | |
2556 | static int transport_execute_tasks(struct se_cmd *cmd) | |
2557 | { | |
2558 | int add_tasks; | |
2559 | ||
2560 | if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { | |
2561 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { | |
2562 | cmd->transport_error_status = | |
2563 | PYX_TRANSPORT_LU_COMM_FAILURE; | |
2564 | transport_generic_request_failure(cmd, NULL, 0, 1); | |
2565 | return 0; | |
2566 | } | |
2567 | } | |
2568 | /* | |
2569 | * Call transport_cmd_check_stop() to see if a fabric exception | |
2570 | * has occured that prevents execution. | |
2571 | */ | |
2572 | if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { | |
2573 | /* | |
2574 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | |
2575 | * attribute for the tasks of the received struct se_cmd CDB | |
2576 | */ | |
2577 | add_tasks = transport_execute_task_attr(cmd); | |
2578 | if (add_tasks == 0) | |
2579 | goto execute_tasks; | |
2580 | /* | |
2581 | * This calls transport_add_tasks_from_cmd() to handle | |
2582 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | |
2583 | * (if enabled) in __transport_add_task_to_execute_queue() and | |
2584 | * transport_add_task_check_sam_attr(). | |
2585 | */ | |
2586 | transport_add_tasks_from_cmd(cmd); | |
2587 | } | |
2588 | /* | |
2589 | * Kick the execution queue for the cmd associated struct se_device | |
2590 | * storage object. | |
2591 | */ | |
2592 | execute_tasks: | |
2593 | __transport_execute_tasks(SE_DEV(cmd)); | |
2594 | return 0; | |
2595 | } | |
2596 | ||
2597 | /* | |
2598 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | |
2599 | * from struct se_device->execute_task_list and | |
2600 | * | |
2601 | * Called from transport_processing_thread() | |
2602 | */ | |
2603 | static int __transport_execute_tasks(struct se_device *dev) | |
2604 | { | |
2605 | int error; | |
2606 | struct se_cmd *cmd = NULL; | |
2607 | struct se_task *task; | |
2608 | unsigned long flags; | |
2609 | ||
2610 | /* | |
2611 | * Check if there is enough room in the device and HBA queue to send | |
2612 | * struct se_transport_task's to the selected transport. | |
2613 | */ | |
2614 | check_depth: | |
2615 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | |
2616 | if (!(atomic_read(&dev->depth_left)) || | |
2617 | !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { | |
2618 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2619 | return transport_tcq_window_closed(dev); | |
2620 | } | |
2621 | dev->dev_tcq_window_closed = 0; | |
2622 | ||
2623 | spin_lock(&dev->execute_task_lock); | |
2624 | task = transport_get_task_from_execute_queue(dev); | |
2625 | spin_unlock(&dev->execute_task_lock); | |
2626 | ||
2627 | if (!task) { | |
2628 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2629 | return 0; | |
2630 | } | |
2631 | ||
2632 | atomic_dec(&dev->depth_left); | |
2633 | atomic_dec(&SE_HBA(dev)->left_queue_depth); | |
2634 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | |
2635 | ||
2636 | cmd = TASK_CMD(task); | |
2637 | ||
2638 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2639 | atomic_set(&task->task_active, 1); | |
2640 | atomic_set(&task->task_sent, 1); | |
2641 | atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); | |
2642 | ||
2643 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == | |
2644 | T_TASK(cmd)->t_task_cdbs) | |
2645 | atomic_set(&cmd->transport_sent, 1); | |
2646 | ||
2647 | transport_start_task_timer(task); | |
2648 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2649 | /* | |
2650 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | |
2651 | * to grab REPORT_LUNS CDBs before they hit the | |
2652 | * struct se_subsystem_api->do_task() caller below. | |
2653 | */ | |
2654 | if (cmd->transport_emulate_cdb) { | |
2655 | error = cmd->transport_emulate_cdb(cmd); | |
2656 | if (error != 0) { | |
2657 | cmd->transport_error_status = error; | |
2658 | atomic_set(&task->task_active, 0); | |
2659 | atomic_set(&cmd->transport_sent, 0); | |
2660 | transport_stop_tasks_for_cmd(cmd); | |
2661 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2662 | goto check_depth; | |
2663 | } | |
2664 | /* | |
2665 | * Handle the successful completion for transport_emulate_cdb() | |
2666 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | |
2667 | * Otherwise the caller is expected to complete the task with | |
2668 | * proper status. | |
2669 | */ | |
2670 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
2671 | cmd->scsi_status = SAM_STAT_GOOD; | |
2672 | task->task_scsi_status = GOOD; | |
2673 | transport_complete_task(task, 1); | |
2674 | } | |
2675 | } else { | |
2676 | /* | |
2677 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | |
2678 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | |
2679 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | |
2680 | * LUN emulation code. | |
2681 | * | |
2682 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | |
2683 | * call ->do_task() directly and let the underlying TCM subsystem plugin | |
2684 | * code handle the CDB emulation. | |
2685 | */ | |
2686 | if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && | |
2687 | (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | |
2688 | error = transport_emulate_control_cdb(task); | |
2689 | else | |
2690 | error = TRANSPORT(dev)->do_task(task); | |
2691 | ||
2692 | if (error != 0) { | |
2693 | cmd->transport_error_status = error; | |
2694 | atomic_set(&task->task_active, 0); | |
2695 | atomic_set(&cmd->transport_sent, 0); | |
2696 | transport_stop_tasks_for_cmd(cmd); | |
2697 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2698 | } | |
2699 | } | |
2700 | ||
2701 | goto check_depth; | |
2702 | ||
2703 | return 0; | |
2704 | } | |
2705 | ||
2706 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | |
2707 | { | |
2708 | unsigned long flags; | |
2709 | /* | |
2710 | * Any unsolicited data will get dumped for failed command inside of | |
2711 | * the fabric plugin | |
2712 | */ | |
2713 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | |
2714 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; | |
2715 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2716 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | |
2717 | ||
2718 | CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); | |
2719 | } | |
2720 | ||
2721 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | |
2722 | ||
2723 | static inline u32 transport_get_sectors_6( | |
2724 | unsigned char *cdb, | |
2725 | struct se_cmd *cmd, | |
2726 | int *ret) | |
2727 | { | |
2728 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2729 | ||
2730 | /* | |
2731 | * Assume TYPE_DISK for non struct se_device objects. | |
2732 | * Use 8-bit sector value. | |
2733 | */ | |
2734 | if (!dev) | |
2735 | goto type_disk; | |
2736 | ||
2737 | /* | |
2738 | * Use 24-bit allocation length for TYPE_TAPE. | |
2739 | */ | |
2740 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | |
2741 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; | |
2742 | ||
2743 | /* | |
2744 | * Everything else assume TYPE_DISK Sector CDB location. | |
2745 | * Use 8-bit sector value. | |
2746 | */ | |
2747 | type_disk: | |
2748 | return (u32)cdb[4]; | |
2749 | } | |
2750 | ||
2751 | static inline u32 transport_get_sectors_10( | |
2752 | unsigned char *cdb, | |
2753 | struct se_cmd *cmd, | |
2754 | int *ret) | |
2755 | { | |
2756 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2757 | ||
2758 | /* | |
2759 | * Assume TYPE_DISK for non struct se_device objects. | |
2760 | * Use 16-bit sector value. | |
2761 | */ | |
2762 | if (!dev) | |
2763 | goto type_disk; | |
2764 | ||
2765 | /* | |
2766 | * XXX_10 is not defined in SSC, throw an exception | |
2767 | */ | |
2768 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | |
2769 | *ret = -1; | |
2770 | return 0; | |
2771 | } | |
2772 | ||
2773 | /* | |
2774 | * Everything else assume TYPE_DISK Sector CDB location. | |
2775 | * Use 16-bit sector value. | |
2776 | */ | |
2777 | type_disk: | |
2778 | return (u32)(cdb[7] << 8) + cdb[8]; | |
2779 | } | |
2780 | ||
2781 | static inline u32 transport_get_sectors_12( | |
2782 | unsigned char *cdb, | |
2783 | struct se_cmd *cmd, | |
2784 | int *ret) | |
2785 | { | |
2786 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2787 | ||
2788 | /* | |
2789 | * Assume TYPE_DISK for non struct se_device objects. | |
2790 | * Use 32-bit sector value. | |
2791 | */ | |
2792 | if (!dev) | |
2793 | goto type_disk; | |
2794 | ||
2795 | /* | |
2796 | * XXX_12 is not defined in SSC, throw an exception | |
2797 | */ | |
2798 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | |
2799 | *ret = -1; | |
2800 | return 0; | |
2801 | } | |
2802 | ||
2803 | /* | |
2804 | * Everything else assume TYPE_DISK Sector CDB location. | |
2805 | * Use 32-bit sector value. | |
2806 | */ | |
2807 | type_disk: | |
2808 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | |
2809 | } | |
2810 | ||
2811 | static inline u32 transport_get_sectors_16( | |
2812 | unsigned char *cdb, | |
2813 | struct se_cmd *cmd, | |
2814 | int *ret) | |
2815 | { | |
2816 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | |
2817 | ||
2818 | /* | |
2819 | * Assume TYPE_DISK for non struct se_device objects. | |
2820 | * Use 32-bit sector value. | |
2821 | */ | |
2822 | if (!dev) | |
2823 | goto type_disk; | |
2824 | ||
2825 | /* | |
2826 | * Use 24-bit allocation length for TYPE_TAPE. | |
2827 | */ | |
2828 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | |
2829 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; | |
2830 | ||
2831 | type_disk: | |
2832 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | |
2833 | (cdb[12] << 8) + cdb[13]; | |
2834 | } | |
2835 | ||
2836 | /* | |
2837 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | |
2838 | */ | |
2839 | static inline u32 transport_get_sectors_32( | |
2840 | unsigned char *cdb, | |
2841 | struct se_cmd *cmd, | |
2842 | int *ret) | |
2843 | { | |
2844 | /* | |
2845 | * Assume TYPE_DISK for non struct se_device objects. | |
2846 | * Use 32-bit sector value. | |
2847 | */ | |
2848 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | |
2849 | (cdb[30] << 8) + cdb[31]; | |
2850 | ||
2851 | } | |
2852 | ||
2853 | static inline u32 transport_get_size( | |
2854 | u32 sectors, | |
2855 | unsigned char *cdb, | |
2856 | struct se_cmd *cmd) | |
2857 | { | |
2858 | struct se_device *dev = SE_DEV(cmd); | |
2859 | ||
2860 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | |
2861 | if (cdb[1] & 1) { /* sectors */ | |
2862 | return DEV_ATTRIB(dev)->block_size * sectors; | |
2863 | } else /* bytes */ | |
2864 | return sectors; | |
2865 | } | |
2866 | #if 0 | |
2867 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" | |
2868 | " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, | |
2869 | DEV_ATTRIB(dev)->block_size * sectors, | |
2870 | TRANSPORT(dev)->name); | |
2871 | #endif | |
2872 | return DEV_ATTRIB(dev)->block_size * sectors; | |
2873 | } | |
2874 | ||
2875 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) | |
2876 | { | |
2877 | unsigned char result = 0; | |
2878 | /* | |
2879 | * MSB | |
2880 | */ | |
2881 | if ((val[0] >= 'a') && (val[0] <= 'f')) | |
2882 | result = ((val[0] - 'a' + 10) & 0xf) << 4; | |
2883 | else | |
2884 | if ((val[0] >= 'A') && (val[0] <= 'F')) | |
2885 | result = ((val[0] - 'A' + 10) & 0xf) << 4; | |
2886 | else /* digit */ | |
2887 | result = ((val[0] - '0') & 0xf) << 4; | |
2888 | /* | |
2889 | * LSB | |
2890 | */ | |
2891 | if ((val[1] >= 'a') && (val[1] <= 'f')) | |
2892 | result |= ((val[1] - 'a' + 10) & 0xf); | |
2893 | else | |
2894 | if ((val[1] >= 'A') && (val[1] <= 'F')) | |
2895 | result |= ((val[1] - 'A' + 10) & 0xf); | |
2896 | else /* digit */ | |
2897 | result |= ((val[1] - '0') & 0xf); | |
2898 | ||
2899 | return result; | |
2900 | } | |
2901 | EXPORT_SYMBOL(transport_asciihex_to_binaryhex); | |
2902 | ||
2903 | static void transport_xor_callback(struct se_cmd *cmd) | |
2904 | { | |
2905 | unsigned char *buf, *addr; | |
2906 | struct se_mem *se_mem; | |
2907 | unsigned int offset; | |
2908 | int i; | |
2909 | /* | |
2910 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | |
2911 | * | |
2912 | * 1) read the specified logical block(s); | |
2913 | * 2) transfer logical blocks from the data-out buffer; | |
2914 | * 3) XOR the logical blocks transferred from the data-out buffer with | |
2915 | * the logical blocks read, storing the resulting XOR data in a buffer; | |
2916 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | |
2917 | * blocks transferred from the data-out buffer; and | |
2918 | * 5) transfer the resulting XOR data to the data-in buffer. | |
2919 | */ | |
2920 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | |
2921 | if (!(buf)) { | |
2922 | printk(KERN_ERR "Unable to allocate xor_callback buf\n"); | |
2923 | return; | |
2924 | } | |
2925 | /* | |
2926 | * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list | |
2927 | * into the locally allocated *buf | |
2928 | */ | |
2929 | transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); | |
2930 | /* | |
2931 | * Now perform the XOR against the BIDI read memory located at | |
2932 | * T_TASK(cmd)->t_mem_bidi_list | |
2933 | */ | |
2934 | ||
2935 | offset = 0; | |
2936 | list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { | |
2937 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); | |
2938 | if (!(addr)) | |
2939 | goto out; | |
2940 | ||
2941 | for (i = 0; i < se_mem->se_len; i++) | |
2942 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); | |
2943 | ||
2944 | offset += se_mem->se_len; | |
2945 | kunmap_atomic(addr, KM_USER0); | |
2946 | } | |
2947 | out: | |
2948 | kfree(buf); | |
2949 | } | |
2950 | ||
2951 | /* | |
2952 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | |
2953 | */ | |
2954 | static int transport_get_sense_data(struct se_cmd *cmd) | |
2955 | { | |
2956 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | |
2957 | struct se_device *dev; | |
2958 | struct se_task *task = NULL, *task_tmp; | |
2959 | unsigned long flags; | |
2960 | u32 offset = 0; | |
2961 | ||
2962 | if (!SE_LUN(cmd)) { | |
2963 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | |
2964 | return -1; | |
2965 | } | |
2966 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
2967 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | |
2968 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2969 | return 0; | |
2970 | } | |
2971 | ||
2972 | list_for_each_entry_safe(task, task_tmp, | |
2973 | &T_TASK(cmd)->t_task_list, t_list) { | |
2974 | ||
2975 | if (!task->task_sense) | |
2976 | continue; | |
2977 | ||
2978 | dev = task->se_dev; | |
2979 | if (!(dev)) | |
2980 | continue; | |
2981 | ||
2982 | if (!TRANSPORT(dev)->get_sense_buffer) { | |
2983 | printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" | |
2984 | " is NULL\n"); | |
2985 | continue; | |
2986 | } | |
2987 | ||
2988 | sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); | |
2989 | if (!(sense_buffer)) { | |
2990 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" | |
2991 | " sense buffer for task with sense\n", | |
2992 | CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); | |
2993 | continue; | |
2994 | } | |
2995 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
2996 | ||
2997 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | |
2998 | TRANSPORT_SENSE_BUFFER); | |
2999 | ||
3000 | memcpy((void *)&buffer[offset], (void *)sense_buffer, | |
3001 | TRANSPORT_SENSE_BUFFER); | |
3002 | cmd->scsi_status = task->task_scsi_status; | |
3003 | /* Automatically padded */ | |
3004 | cmd->scsi_sense_length = | |
3005 | (TRANSPORT_SENSE_BUFFER + offset); | |
3006 | ||
3007 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" | |
3008 | " and sense\n", | |
3009 | dev->se_hba->hba_id, TRANSPORT(dev)->name, | |
3010 | cmd->scsi_status); | |
3011 | return 0; | |
3012 | } | |
3013 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
3014 | ||
3015 | return -1; | |
3016 | } | |
3017 | ||
3018 | static int transport_allocate_resources(struct se_cmd *cmd) | |
3019 | { | |
3020 | u32 length = cmd->data_length; | |
3021 | ||
3022 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | |
3023 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) | |
3024 | return transport_generic_get_mem(cmd, length, PAGE_SIZE); | |
3025 | else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) | |
3026 | return transport_generic_allocate_buf(cmd, length); | |
3027 | else | |
3028 | return 0; | |
3029 | } | |
3030 | ||
3031 | static int | |
3032 | transport_handle_reservation_conflict(struct se_cmd *cmd) | |
3033 | { | |
3034 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
3035 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3036 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | |
3037 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
3038 | /* | |
3039 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
3040 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
3041 | * CONFLICT STATUS. | |
3042 | * | |
3043 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
3044 | */ | |
3045 | if (SE_SESS(cmd) && | |
3046 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | |
3047 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | |
3048 | cmd->orig_fe_lun, 0x2C, | |
3049 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
3050 | return -2; | |
3051 | } | |
3052 | ||
3053 | /* transport_generic_cmd_sequencer(): | |
3054 | * | |
3055 | * Generic Command Sequencer that should work for most DAS transport | |
3056 | * drivers. | |
3057 | * | |
3058 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | |
3059 | * RX Thread. | |
3060 | * | |
3061 | * FIXME: Need to support other SCSI OPCODES where as well. | |
3062 | */ | |
3063 | static int transport_generic_cmd_sequencer( | |
3064 | struct se_cmd *cmd, | |
3065 | unsigned char *cdb) | |
3066 | { | |
3067 | struct se_device *dev = SE_DEV(cmd); | |
3068 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | |
3069 | int ret = 0, sector_ret = 0, passthrough; | |
3070 | u32 sectors = 0, size = 0, pr_reg_type = 0; | |
3071 | u16 service_action; | |
3072 | u8 alua_ascq = 0; | |
3073 | /* | |
3074 | * Check for an existing UNIT ATTENTION condition | |
3075 | */ | |
3076 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | |
3077 | cmd->transport_wait_for_tasks = | |
3078 | &transport_nop_wait_for_tasks; | |
3079 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3080 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | |
3081 | return -2; | |
3082 | } | |
3083 | /* | |
3084 | * Check status of Asymmetric Logical Unit Assignment port | |
3085 | */ | |
3086 | ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); | |
3087 | if (ret != 0) { | |
3088 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
3089 | /* | |
3090 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessable'; | |
3091 | * The ALUA additional sense code qualifier (ASCQ) is determined | |
3092 | * by the ALUA primary or secondary access state.. | |
3093 | */ | |
3094 | if (ret > 0) { | |
3095 | #if 0 | |
3096 | printk(KERN_INFO "[%s]: ALUA TG Port not available," | |
3097 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | |
3098 | CMD_TFO(cmd)->get_fabric_name(), alua_ascq); | |
3099 | #endif | |
3100 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | |
3101 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3102 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | |
3103 | return -2; | |
3104 | } | |
3105 | goto out_invalid_cdb_field; | |
3106 | } | |
3107 | /* | |
3108 | * Check status for SPC-3 Persistent Reservations | |
3109 | */ | |
3110 | if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { | |
3111 | if (T10_PR_OPS(su_dev)->t10_seq_non_holder( | |
3112 | cmd, cdb, pr_reg_type) != 0) | |
3113 | return transport_handle_reservation_conflict(cmd); | |
3114 | /* | |
3115 | * This means the CDB is allowed for the SCSI Initiator port | |
3116 | * when said port is *NOT* holding the legacy SPC-2 or | |
3117 | * SPC-3 Persistent Reservation. | |
3118 | */ | |
3119 | } | |
3120 | ||
3121 | switch (cdb[0]) { | |
3122 | case READ_6: | |
3123 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
3124 | if (sector_ret) | |
3125 | goto out_unsupported_cdb; | |
3126 | size = transport_get_size(sectors, cdb, cmd); | |
3127 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
3128 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | |
3129 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3130 | break; | |
3131 | case READ_10: | |
3132 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3133 | if (sector_ret) | |
3134 | goto out_unsupported_cdb; | |
3135 | size = transport_get_size(sectors, cdb, cmd); | |
3136 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
3137 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3138 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3139 | break; | |
3140 | case READ_12: | |
3141 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3142 | if (sector_ret) | |
3143 | goto out_unsupported_cdb; | |
3144 | size = transport_get_size(sectors, cdb, cmd); | |
3145 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
3146 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3147 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3148 | break; | |
3149 | case READ_16: | |
3150 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3151 | if (sector_ret) | |
3152 | goto out_unsupported_cdb; | |
3153 | size = transport_get_size(sectors, cdb, cmd); | |
3154 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
3155 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | |
3156 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3157 | break; | |
3158 | case WRITE_6: | |
3159 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
3160 | if (sector_ret) | |
3161 | goto out_unsupported_cdb; | |
3162 | size = transport_get_size(sectors, cdb, cmd); | |
3163 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
3164 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | |
3165 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3166 | break; | |
3167 | case WRITE_10: | |
3168 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3169 | if (sector_ret) | |
3170 | goto out_unsupported_cdb; | |
3171 | size = transport_get_size(sectors, cdb, cmd); | |
3172 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
3173 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3174 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3175 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3176 | break; | |
3177 | case WRITE_12: | |
3178 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3179 | if (sector_ret) | |
3180 | goto out_unsupported_cdb; | |
3181 | size = transport_get_size(sectors, cdb, cmd); | |
3182 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
3183 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3184 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3185 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3186 | break; | |
3187 | case WRITE_16: | |
3188 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3189 | if (sector_ret) | |
3190 | goto out_unsupported_cdb; | |
3191 | size = transport_get_size(sectors, cdb, cmd); | |
3192 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
3193 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | |
3194 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3195 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3196 | break; | |
3197 | case XDWRITEREAD_10: | |
3198 | if ((cmd->data_direction != DMA_TO_DEVICE) || | |
3199 | !(T_TASK(cmd)->t_tasks_bidi)) | |
3200 | goto out_invalid_cdb_field; | |
3201 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3202 | if (sector_ret) | |
3203 | goto out_unsupported_cdb; | |
3204 | size = transport_get_size(sectors, cdb, cmd); | |
3205 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
3206 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3207 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3208 | passthrough = (TRANSPORT(dev)->transport_type == | |
3209 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3210 | /* | |
3211 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3212 | */ | |
3213 | if (passthrough) | |
3214 | break; | |
3215 | /* | |
3216 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | |
3217 | */ | |
3218 | cmd->transport_complete_callback = &transport_xor_callback; | |
3219 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | |
3220 | break; | |
3221 | case VARIABLE_LENGTH_CMD: | |
3222 | service_action = get_unaligned_be16(&cdb[8]); | |
3223 | /* | |
3224 | * Determine if this is TCM/PSCSI device and we should disable | |
3225 | * internal emulation for this CDB. | |
3226 | */ | |
3227 | passthrough = (TRANSPORT(dev)->transport_type == | |
3228 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3229 | ||
3230 | switch (service_action) { | |
3231 | case XDWRITEREAD_32: | |
3232 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3233 | if (sector_ret) | |
3234 | goto out_unsupported_cdb; | |
3235 | size = transport_get_size(sectors, cdb, cmd); | |
3236 | /* | |
3237 | * Use WRITE_32 and READ_32 opcodes for the emulated | |
3238 | * XDWRITE_READ_32 logic. | |
3239 | */ | |
3240 | cmd->transport_split_cdb = &split_cdb_XX_32; | |
3241 | T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); | |
3242 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | |
3243 | ||
3244 | /* | |
3245 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3246 | */ | |
3247 | if (passthrough) | |
3248 | break; | |
3249 | ||
3250 | /* | |
3251 | * Setup BIDI XOR callback to be run during | |
3252 | * transport_generic_complete_ok() | |
3253 | */ | |
3254 | cmd->transport_complete_callback = &transport_xor_callback; | |
3255 | T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); | |
3256 | break; | |
3257 | case WRITE_SAME_32: | |
3258 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3259 | if (sector_ret) | |
3260 | goto out_unsupported_cdb; | |
3261 | size = transport_get_size(sectors, cdb, cmd); | |
3262 | T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); | |
3263 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3264 | ||
3265 | /* | |
3266 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3267 | */ | |
3268 | if (passthrough) | |
3269 | break; | |
3270 | ||
3271 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | |
3272 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | |
3273 | " bits not supported for Block Discard" | |
3274 | " Emulation\n"); | |
3275 | goto out_invalid_cdb_field; | |
3276 | } | |
3277 | /* | |
3278 | * Currently for the emulated case we only accept | |
3279 | * tpws with the UNMAP=1 bit set. | |
3280 | */ | |
3281 | if (!(cdb[10] & 0x08)) { | |
3282 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" | |
3283 | " supported for Block Discard Emulation\n"); | |
3284 | goto out_invalid_cdb_field; | |
3285 | } | |
3286 | break; | |
3287 | default: | |
3288 | printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" | |
3289 | " 0x%04x not supported\n", service_action); | |
3290 | goto out_unsupported_cdb; | |
3291 | } | |
3292 | break; | |
3293 | case 0xa3: | |
3294 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | |
3295 | /* MAINTENANCE_IN from SCC-2 */ | |
3296 | /* | |
3297 | * Check for emulated MI_REPORT_TARGET_PGS. | |
3298 | */ | |
3299 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | |
3300 | cmd->transport_emulate_cdb = | |
3301 | (T10_ALUA(su_dev)->alua_type == | |
3302 | SPC3_ALUA_EMULATED) ? | |
3303 | &core_emulate_report_target_port_groups : | |
3304 | NULL; | |
3305 | } | |
3306 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3307 | (cdb[8] << 8) | cdb[9]; | |
3308 | } else { | |
3309 | /* GPCMD_SEND_KEY from multi media commands */ | |
3310 | size = (cdb[8] << 8) + cdb[9]; | |
3311 | } | |
3312 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3313 | break; | |
3314 | case MODE_SELECT: | |
3315 | size = cdb[4]; | |
3316 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3317 | break; | |
3318 | case MODE_SELECT_10: | |
3319 | size = (cdb[7] << 8) + cdb[8]; | |
3320 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3321 | break; | |
3322 | case MODE_SENSE: | |
3323 | size = cdb[4]; | |
3324 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3325 | break; | |
3326 | case MODE_SENSE_10: | |
3327 | case GPCMD_READ_BUFFER_CAPACITY: | |
3328 | case GPCMD_SEND_OPC: | |
3329 | case LOG_SELECT: | |
3330 | case LOG_SENSE: | |
3331 | size = (cdb[7] << 8) + cdb[8]; | |
3332 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3333 | break; | |
3334 | case READ_BLOCK_LIMITS: | |
3335 | size = READ_BLOCK_LEN; | |
3336 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3337 | break; | |
3338 | case GPCMD_GET_CONFIGURATION: | |
3339 | case GPCMD_READ_FORMAT_CAPACITIES: | |
3340 | case GPCMD_READ_DISC_INFO: | |
3341 | case GPCMD_READ_TRACK_RZONE_INFO: | |
3342 | size = (cdb[7] << 8) + cdb[8]; | |
3343 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3344 | break; | |
3345 | case PERSISTENT_RESERVE_IN: | |
3346 | case PERSISTENT_RESERVE_OUT: | |
3347 | cmd->transport_emulate_cdb = | |
3348 | (T10_RES(su_dev)->res_type == | |
3349 | SPC3_PERSISTENT_RESERVATIONS) ? | |
3350 | &core_scsi3_emulate_pr : NULL; | |
3351 | size = (cdb[7] << 8) + cdb[8]; | |
3352 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3353 | break; | |
3354 | case GPCMD_MECHANISM_STATUS: | |
3355 | case GPCMD_READ_DVD_STRUCTURE: | |
3356 | size = (cdb[8] << 8) + cdb[9]; | |
3357 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3358 | break; | |
3359 | case READ_POSITION: | |
3360 | size = READ_POSITION_LEN; | |
3361 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3362 | break; | |
3363 | case 0xa4: | |
3364 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | |
3365 | /* MAINTENANCE_OUT from SCC-2 | |
3366 | * | |
3367 | * Check for emulated MO_SET_TARGET_PGS. | |
3368 | */ | |
3369 | if (cdb[1] == MO_SET_TARGET_PGS) { | |
3370 | cmd->transport_emulate_cdb = | |
3371 | (T10_ALUA(su_dev)->alua_type == | |
3372 | SPC3_ALUA_EMULATED) ? | |
3373 | &core_emulate_set_target_port_groups : | |
3374 | NULL; | |
3375 | } | |
3376 | ||
3377 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3378 | (cdb[8] << 8) | cdb[9]; | |
3379 | } else { | |
3380 | /* GPCMD_REPORT_KEY from multi media commands */ | |
3381 | size = (cdb[8] << 8) + cdb[9]; | |
3382 | } | |
3383 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3384 | break; | |
3385 | case INQUIRY: | |
3386 | size = (cdb[3] << 8) + cdb[4]; | |
3387 | /* | |
3388 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | |
3389 | * See spc4r17 section 5.3 | |
3390 | */ | |
3391 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
3392 | cmd->sam_task_attr = TASK_ATTR_HOQ; | |
3393 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3394 | break; | |
3395 | case READ_BUFFER: | |
3396 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3397 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3398 | break; | |
3399 | case READ_CAPACITY: | |
3400 | size = READ_CAP_LEN; | |
3401 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3402 | break; | |
3403 | case READ_MEDIA_SERIAL_NUMBER: | |
3404 | case SECURITY_PROTOCOL_IN: | |
3405 | case SECURITY_PROTOCOL_OUT: | |
3406 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
3407 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3408 | break; | |
3409 | case SERVICE_ACTION_IN: | |
3410 | case ACCESS_CONTROL_IN: | |
3411 | case ACCESS_CONTROL_OUT: | |
3412 | case EXTENDED_COPY: | |
3413 | case READ_ATTRIBUTE: | |
3414 | case RECEIVE_COPY_RESULTS: | |
3415 | case WRITE_ATTRIBUTE: | |
3416 | size = (cdb[10] << 24) | (cdb[11] << 16) | | |
3417 | (cdb[12] << 8) | cdb[13]; | |
3418 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3419 | break; | |
3420 | case RECEIVE_DIAGNOSTIC: | |
3421 | case SEND_DIAGNOSTIC: | |
3422 | size = (cdb[3] << 8) | cdb[4]; | |
3423 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3424 | break; | |
3425 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | |
3426 | #if 0 | |
3427 | case GPCMD_READ_CD: | |
3428 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3429 | size = (2336 * sectors); | |
3430 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3431 | break; | |
3432 | #endif | |
3433 | case READ_TOC: | |
3434 | size = cdb[8]; | |
3435 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3436 | break; | |
3437 | case REQUEST_SENSE: | |
3438 | size = cdb[4]; | |
3439 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3440 | break; | |
3441 | case READ_ELEMENT_STATUS: | |
3442 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | |
3443 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3444 | break; | |
3445 | case WRITE_BUFFER: | |
3446 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3447 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3448 | break; | |
3449 | case RESERVE: | |
3450 | case RESERVE_10: | |
3451 | /* | |
3452 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | |
3453 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3454 | */ | |
3455 | if (cdb[0] == RESERVE_10) | |
3456 | size = (cdb[7] << 8) | cdb[8]; | |
3457 | else | |
3458 | size = cmd->data_length; | |
3459 | ||
3460 | /* | |
3461 | * Setup the legacy emulated handler for SPC-2 and | |
3462 | * >= SPC-3 compatible reservation handling (CRH=1) | |
3463 | * Otherwise, we assume the underlying SCSI logic is | |
3464 | * is running in SPC_PASSTHROUGH, and wants reservations | |
3465 | * emulation disabled. | |
3466 | */ | |
3467 | cmd->transport_emulate_cdb = | |
3468 | (T10_RES(su_dev)->res_type != | |
3469 | SPC_PASSTHROUGH) ? | |
3470 | &core_scsi2_emulate_crh : NULL; | |
3471 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3472 | break; | |
3473 | case RELEASE: | |
3474 | case RELEASE_10: | |
3475 | /* | |
3476 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | |
3477 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3478 | */ | |
3479 | if (cdb[0] == RELEASE_10) | |
3480 | size = (cdb[7] << 8) | cdb[8]; | |
3481 | else | |
3482 | size = cmd->data_length; | |
3483 | ||
3484 | cmd->transport_emulate_cdb = | |
3485 | (T10_RES(su_dev)->res_type != | |
3486 | SPC_PASSTHROUGH) ? | |
3487 | &core_scsi2_emulate_crh : NULL; | |
3488 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3489 | break; | |
3490 | case SYNCHRONIZE_CACHE: | |
3491 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | |
3492 | /* | |
3493 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | |
3494 | */ | |
3495 | if (cdb[0] == SYNCHRONIZE_CACHE) { | |
3496 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3497 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | |
3498 | } else { | |
3499 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3500 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | |
3501 | } | |
3502 | if (sector_ret) | |
3503 | goto out_unsupported_cdb; | |
3504 | ||
3505 | size = transport_get_size(sectors, cdb, cmd); | |
3506 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3507 | ||
3508 | /* | |
3509 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | |
3510 | */ | |
3511 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | |
3512 | break; | |
3513 | /* | |
3514 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | |
3515 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | |
3516 | */ | |
3517 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | |
3518 | /* | |
3519 | * Check to ensure that LBA + Range does not exceed past end of | |
3520 | * device. | |
3521 | */ | |
3522 | if (transport_get_sectors(cmd) < 0) | |
3523 | goto out_invalid_cdb_field; | |
3524 | break; | |
3525 | case UNMAP: | |
3526 | size = get_unaligned_be16(&cdb[7]); | |
3527 | passthrough = (TRANSPORT(dev)->transport_type == | |
3528 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3529 | /* | |
3530 | * Determine if the received UNMAP used to for direct passthrough | |
3531 | * into Linux/SCSI with struct request via TCM/pSCSI or we are | |
3532 | * signaling the use of internal transport_generic_unmap() emulation | |
3533 | * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO | |
3534 | * subsystem plugin backstores. | |
3535 | */ | |
3536 | if (!(passthrough)) | |
3537 | cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; | |
3538 | ||
3539 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3540 | break; | |
3541 | case WRITE_SAME_16: | |
3542 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3543 | if (sector_ret) | |
3544 | goto out_unsupported_cdb; | |
3545 | size = transport_get_size(sectors, cdb, cmd); | |
3546 | T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); | |
3547 | passthrough = (TRANSPORT(dev)->transport_type == | |
3548 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
3549 | /* | |
3550 | * Determine if the received WRITE_SAME_16 is used to for direct | |
3551 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
3552 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
3553 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | |
3554 | * TCM/FILEIO subsystem plugin backstores. | |
3555 | */ | |
3556 | if (!(passthrough)) { | |
3557 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | |
3558 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | |
3559 | " bits not supported for Block Discard" | |
3560 | " Emulation\n"); | |
3561 | goto out_invalid_cdb_field; | |
3562 | } | |
3563 | /* | |
3564 | * Currently for the emulated case we only accept | |
3565 | * tpws with the UNMAP=1 bit set. | |
3566 | */ | |
3567 | if (!(cdb[1] & 0x08)) { | |
3568 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " | |
3569 | " supported for Block Discard Emulation\n"); | |
3570 | goto out_invalid_cdb_field; | |
3571 | } | |
3572 | } | |
3573 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3574 | break; | |
3575 | case ALLOW_MEDIUM_REMOVAL: | |
3576 | case GPCMD_CLOSE_TRACK: | |
3577 | case ERASE: | |
3578 | case INITIALIZE_ELEMENT_STATUS: | |
3579 | case GPCMD_LOAD_UNLOAD: | |
3580 | case REZERO_UNIT: | |
3581 | case SEEK_10: | |
3582 | case GPCMD_SET_SPEED: | |
3583 | case SPACE: | |
3584 | case START_STOP: | |
3585 | case TEST_UNIT_READY: | |
3586 | case VERIFY: | |
3587 | case WRITE_FILEMARKS: | |
3588 | case MOVE_MEDIUM: | |
3589 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3590 | break; | |
3591 | case REPORT_LUNS: | |
3592 | cmd->transport_emulate_cdb = | |
3593 | &transport_core_report_lun_response; | |
3594 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
3595 | /* | |
3596 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | |
3597 | * See spc4r17 section 5.3 | |
3598 | */ | |
3599 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
3600 | cmd->sam_task_attr = TASK_ATTR_HOQ; | |
3601 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3602 | break; | |
3603 | default: | |
3604 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" | |
3605 | " 0x%02x, sending CHECK_CONDITION.\n", | |
3606 | CMD_TFO(cmd)->get_fabric_name(), cdb[0]); | |
3607 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
3608 | goto out_unsupported_cdb; | |
3609 | } | |
3610 | ||
3611 | if (size != cmd->data_length) { | |
3612 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" | |
3613 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | |
3614 | " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), | |
3615 | cmd->data_length, size, cdb[0]); | |
3616 | ||
3617 | cmd->cmd_spdtl = size; | |
3618 | ||
3619 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
3620 | printk(KERN_ERR "Rejecting underflow/overflow" | |
3621 | " WRITE data\n"); | |
3622 | goto out_invalid_cdb_field; | |
3623 | } | |
3624 | /* | |
3625 | * Reject READ_* or WRITE_* with overflow/underflow for | |
3626 | * type SCF_SCSI_DATA_SG_IO_CDB. | |
3627 | */ | |
3628 | if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { | |
3629 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" | |
3630 | " CDB on non 512-byte sector setup subsystem" | |
3631 | " plugin: %s\n", TRANSPORT(dev)->name); | |
3632 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | |
3633 | goto out_invalid_cdb_field; | |
3634 | } | |
3635 | ||
3636 | if (size > cmd->data_length) { | |
3637 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | |
3638 | cmd->residual_count = (size - cmd->data_length); | |
3639 | } else { | |
3640 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | |
3641 | cmd->residual_count = (cmd->data_length - size); | |
3642 | } | |
3643 | cmd->data_length = size; | |
3644 | } | |
3645 | ||
3646 | transport_set_supported_SAM_opcode(cmd); | |
3647 | return ret; | |
3648 | ||
3649 | out_unsupported_cdb: | |
3650 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3651 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
3652 | return -2; | |
3653 | out_invalid_cdb_field: | |
3654 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3655 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
3656 | return -2; | |
3657 | } | |
3658 | ||
3659 | static inline void transport_release_tasks(struct se_cmd *); | |
3660 | ||
3661 | /* | |
3662 | * This function will copy a contiguous *src buffer into a destination | |
3663 | * struct scatterlist array. | |
3664 | */ | |
3665 | static void transport_memcpy_write_contig( | |
3666 | struct se_cmd *cmd, | |
3667 | struct scatterlist *sg_d, | |
3668 | unsigned char *src) | |
3669 | { | |
3670 | u32 i = 0, length = 0, total_length = cmd->data_length; | |
3671 | void *dst; | |
3672 | ||
3673 | while (total_length) { | |
3674 | length = sg_d[i].length; | |
3675 | ||
3676 | if (length > total_length) | |
3677 | length = total_length; | |
3678 | ||
3679 | dst = sg_virt(&sg_d[i]); | |
3680 | ||
3681 | memcpy(dst, src, length); | |
3682 | ||
3683 | if (!(total_length -= length)) | |
3684 | return; | |
3685 | ||
3686 | src += length; | |
3687 | i++; | |
3688 | } | |
3689 | } | |
3690 | ||
3691 | /* | |
3692 | * This function will copy a struct scatterlist array *sg_s into a destination | |
3693 | * contiguous *dst buffer. | |
3694 | */ | |
3695 | static void transport_memcpy_read_contig( | |
3696 | struct se_cmd *cmd, | |
3697 | unsigned char *dst, | |
3698 | struct scatterlist *sg_s) | |
3699 | { | |
3700 | u32 i = 0, length = 0, total_length = cmd->data_length; | |
3701 | void *src; | |
3702 | ||
3703 | while (total_length) { | |
3704 | length = sg_s[i].length; | |
3705 | ||
3706 | if (length > total_length) | |
3707 | length = total_length; | |
3708 | ||
3709 | src = sg_virt(&sg_s[i]); | |
3710 | ||
3711 | memcpy(dst, src, length); | |
3712 | ||
3713 | if (!(total_length -= length)) | |
3714 | return; | |
3715 | ||
3716 | dst += length; | |
3717 | i++; | |
3718 | } | |
3719 | } | |
3720 | ||
3721 | static void transport_memcpy_se_mem_read_contig( | |
3722 | struct se_cmd *cmd, | |
3723 | unsigned char *dst, | |
3724 | struct list_head *se_mem_list) | |
3725 | { | |
3726 | struct se_mem *se_mem; | |
3727 | void *src; | |
3728 | u32 length = 0, total_length = cmd->data_length; | |
3729 | ||
3730 | list_for_each_entry(se_mem, se_mem_list, se_list) { | |
3731 | length = se_mem->se_len; | |
3732 | ||
3733 | if (length > total_length) | |
3734 | length = total_length; | |
3735 | ||
3736 | src = page_address(se_mem->se_page) + se_mem->se_off; | |
3737 | ||
3738 | memcpy(dst, src, length); | |
3739 | ||
3740 | if (!(total_length -= length)) | |
3741 | return; | |
3742 | ||
3743 | dst += length; | |
3744 | } | |
3745 | } | |
3746 | ||
3747 | /* | |
3748 | * Called from transport_generic_complete_ok() and | |
3749 | * transport_generic_request_failure() to determine which dormant/delayed | |
3750 | * and ordered cmds need to have their tasks added to the execution queue. | |
3751 | */ | |
3752 | static void transport_complete_task_attr(struct se_cmd *cmd) | |
3753 | { | |
3754 | struct se_device *dev = SE_DEV(cmd); | |
3755 | struct se_cmd *cmd_p, *cmd_tmp; | |
3756 | int new_active_tasks = 0; | |
3757 | ||
3758 | if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { | |
3759 | atomic_dec(&dev->simple_cmds); | |
3760 | smp_mb__after_atomic_dec(); | |
3761 | dev->dev_cur_ordered_id++; | |
3762 | DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" | |
3763 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | |
3764 | cmd->se_ordered_id); | |
3765 | } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | |
3766 | atomic_dec(&dev->dev_hoq_count); | |
3767 | smp_mb__after_atomic_dec(); | |
3768 | dev->dev_cur_ordered_id++; | |
3769 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for" | |
3770 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, | |
3771 | cmd->se_ordered_id); | |
3772 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | |
3773 | spin_lock(&dev->ordered_cmd_lock); | |
3774 | list_del(&cmd->se_ordered_list); | |
3775 | atomic_dec(&dev->dev_ordered_sync); | |
3776 | smp_mb__after_atomic_dec(); | |
3777 | spin_unlock(&dev->ordered_cmd_lock); | |
3778 | ||
3779 | dev->dev_cur_ordered_id++; | |
3780 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" | |
3781 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | |
3782 | } | |
3783 | /* | |
3784 | * Process all commands up to the last received | |
3785 | * ORDERED task attribute which requires another blocking | |
3786 | * boundary | |
3787 | */ | |
3788 | spin_lock(&dev->delayed_cmd_lock); | |
3789 | list_for_each_entry_safe(cmd_p, cmd_tmp, | |
3790 | &dev->delayed_cmd_list, se_delayed_list) { | |
3791 | ||
3792 | list_del(&cmd_p->se_delayed_list); | |
3793 | spin_unlock(&dev->delayed_cmd_lock); | |
3794 | ||
3795 | DEBUG_STA("Calling add_tasks() for" | |
3796 | " cmd_p: 0x%02x Task Attr: 0x%02x" | |
3797 | " Dormant -> Active, se_ordered_id: %u\n", | |
3798 | T_TASK(cmd_p)->t_task_cdb[0], | |
3799 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); | |
3800 | ||
3801 | transport_add_tasks_from_cmd(cmd_p); | |
3802 | new_active_tasks++; | |
3803 | ||
3804 | spin_lock(&dev->delayed_cmd_lock); | |
3805 | if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) | |
3806 | break; | |
3807 | } | |
3808 | spin_unlock(&dev->delayed_cmd_lock); | |
3809 | /* | |
3810 | * If new tasks have become active, wake up the transport thread | |
3811 | * to do the processing of the Active tasks. | |
3812 | */ | |
3813 | if (new_active_tasks != 0) | |
3814 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | |
3815 | } | |
3816 | ||
3817 | static void transport_generic_complete_ok(struct se_cmd *cmd) | |
3818 | { | |
3819 | int reason = 0; | |
3820 | /* | |
3821 | * Check if we need to move delayed/dormant tasks from cmds on the | |
3822 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | |
3823 | * Attribute. | |
3824 | */ | |
3825 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
3826 | transport_complete_task_attr(cmd); | |
3827 | /* | |
3828 | * Check if we need to retrieve a sense buffer from | |
3829 | * the struct se_cmd in question. | |
3830 | */ | |
3831 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3832 | if (transport_get_sense_data(cmd) < 0) | |
3833 | reason = TCM_NON_EXISTENT_LUN; | |
3834 | ||
3835 | /* | |
3836 | * Only set when an struct se_task->task_scsi_status returned | |
3837 | * a non GOOD status. | |
3838 | */ | |
3839 | if (cmd->scsi_status) { | |
3840 | transport_send_check_condition_and_sense( | |
3841 | cmd, reason, 1); | |
3842 | transport_lun_remove_cmd(cmd); | |
3843 | transport_cmd_check_stop_to_fabric(cmd); | |
3844 | return; | |
3845 | } | |
3846 | } | |
3847 | /* | |
3848 | * Check for a callback, used by amoungst other things | |
3849 | * XDWRITE_READ_10 emulation. | |
3850 | */ | |
3851 | if (cmd->transport_complete_callback) | |
3852 | cmd->transport_complete_callback(cmd); | |
3853 | ||
3854 | switch (cmd->data_direction) { | |
3855 | case DMA_FROM_DEVICE: | |
3856 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
3857 | if (SE_LUN(cmd)->lun_sep) { | |
3858 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | |
3859 | cmd->data_length; | |
3860 | } | |
3861 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3862 | /* | |
3863 | * If enabled by TCM fabirc module pre-registered SGL | |
3864 | * memory, perform the memcpy() from the TCM internal | |
3865 | * contigious buffer back to the original SGL. | |
3866 | */ | |
3867 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | |
3868 | transport_memcpy_write_contig(cmd, | |
3869 | T_TASK(cmd)->t_task_pt_sgl, | |
3870 | T_TASK(cmd)->t_task_buf); | |
3871 | ||
3872 | CMD_TFO(cmd)->queue_data_in(cmd); | |
3873 | break; | |
3874 | case DMA_TO_DEVICE: | |
3875 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
3876 | if (SE_LUN(cmd)->lun_sep) { | |
3877 | SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += | |
3878 | cmd->data_length; | |
3879 | } | |
3880 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3881 | /* | |
3882 | * Check if we need to send READ payload for BIDI-COMMAND | |
3883 | */ | |
3884 | if (T_TASK(cmd)->t_mem_bidi_list != NULL) { | |
3885 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
3886 | if (SE_LUN(cmd)->lun_sep) { | |
3887 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | |
3888 | cmd->data_length; | |
3889 | } | |
3890 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3891 | CMD_TFO(cmd)->queue_data_in(cmd); | |
3892 | break; | |
3893 | } | |
3894 | /* Fall through for DMA_TO_DEVICE */ | |
3895 | case DMA_NONE: | |
3896 | CMD_TFO(cmd)->queue_status(cmd); | |
3897 | break; | |
3898 | default: | |
3899 | break; | |
3900 | } | |
3901 | ||
3902 | transport_lun_remove_cmd(cmd); | |
3903 | transport_cmd_check_stop_to_fabric(cmd); | |
3904 | } | |
3905 | ||
3906 | static void transport_free_dev_tasks(struct se_cmd *cmd) | |
3907 | { | |
3908 | struct se_task *task, *task_tmp; | |
3909 | unsigned long flags; | |
3910 | ||
3911 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
3912 | list_for_each_entry_safe(task, task_tmp, | |
3913 | &T_TASK(cmd)->t_task_list, t_list) { | |
3914 | if (atomic_read(&task->task_active)) | |
3915 | continue; | |
3916 | ||
3917 | kfree(task->task_sg_bidi); | |
3918 | kfree(task->task_sg); | |
3919 | ||
3920 | list_del(&task->t_list); | |
3921 | ||
3922 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
3923 | if (task->se_dev) | |
3924 | TRANSPORT(task->se_dev)->free_task(task); | |
3925 | else | |
3926 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", | |
3927 | task->task_no); | |
3928 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
3929 | } | |
3930 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
3931 | } | |
3932 | ||
3933 | static inline void transport_free_pages(struct se_cmd *cmd) | |
3934 | { | |
3935 | struct se_mem *se_mem, *se_mem_tmp; | |
3936 | int free_page = 1; | |
3937 | ||
3938 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | |
3939 | free_page = 0; | |
3940 | if (cmd->se_dev->transport->do_se_mem_map) | |
3941 | free_page = 0; | |
3942 | ||
3943 | if (T_TASK(cmd)->t_task_buf) { | |
3944 | kfree(T_TASK(cmd)->t_task_buf); | |
3945 | T_TASK(cmd)->t_task_buf = NULL; | |
3946 | return; | |
3947 | } | |
3948 | ||
3949 | /* | |
3950 | * Caller will handle releasing of struct se_mem. | |
3951 | */ | |
3952 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) | |
3953 | return; | |
3954 | ||
3955 | if (!(T_TASK(cmd)->t_tasks_se_num)) | |
3956 | return; | |
3957 | ||
3958 | list_for_each_entry_safe(se_mem, se_mem_tmp, | |
3959 | T_TASK(cmd)->t_mem_list, se_list) { | |
3960 | /* | |
3961 | * We only release call __free_page(struct se_mem->se_page) when | |
3962 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | |
3963 | */ | |
3964 | if (free_page) | |
3965 | __free_page(se_mem->se_page); | |
3966 | ||
3967 | list_del(&se_mem->se_list); | |
3968 | kmem_cache_free(se_mem_cache, se_mem); | |
3969 | } | |
3970 | ||
3971 | if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { | |
3972 | list_for_each_entry_safe(se_mem, se_mem_tmp, | |
3973 | T_TASK(cmd)->t_mem_bidi_list, se_list) { | |
3974 | /* | |
3975 | * We only release call __free_page(struct se_mem->se_page) when | |
3976 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | |
3977 | */ | |
3978 | if (free_page) | |
3979 | __free_page(se_mem->se_page); | |
3980 | ||
3981 | list_del(&se_mem->se_list); | |
3982 | kmem_cache_free(se_mem_cache, se_mem); | |
3983 | } | |
3984 | } | |
3985 | ||
3986 | kfree(T_TASK(cmd)->t_mem_bidi_list); | |
3987 | T_TASK(cmd)->t_mem_bidi_list = NULL; | |
3988 | kfree(T_TASK(cmd)->t_mem_list); | |
3989 | T_TASK(cmd)->t_mem_list = NULL; | |
3990 | T_TASK(cmd)->t_tasks_se_num = 0; | |
3991 | } | |
3992 | ||
3993 | static inline void transport_release_tasks(struct se_cmd *cmd) | |
3994 | { | |
3995 | transport_free_dev_tasks(cmd); | |
3996 | } | |
3997 | ||
3998 | static inline int transport_dec_and_check(struct se_cmd *cmd) | |
3999 | { | |
4000 | unsigned long flags; | |
4001 | ||
4002 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4003 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
4004 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { | |
4005 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
4006 | flags); | |
4007 | return 1; | |
4008 | } | |
4009 | } | |
4010 | ||
4011 | if (atomic_read(&T_TASK(cmd)->t_se_count)) { | |
4012 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { | |
4013 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
4014 | flags); | |
4015 | return 1; | |
4016 | } | |
4017 | } | |
4018 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4019 | ||
4020 | return 0; | |
4021 | } | |
4022 | ||
4023 | static void transport_release_fe_cmd(struct se_cmd *cmd) | |
4024 | { | |
4025 | unsigned long flags; | |
4026 | ||
4027 | if (transport_dec_and_check(cmd)) | |
4028 | return; | |
4029 | ||
4030 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4031 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
4032 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4033 | goto free_pages; | |
4034 | } | |
4035 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
4036 | transport_all_task_dev_remove_state(cmd); | |
4037 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4038 | ||
4039 | transport_release_tasks(cmd); | |
4040 | free_pages: | |
4041 | transport_free_pages(cmd); | |
4042 | transport_free_se_cmd(cmd); | |
4043 | CMD_TFO(cmd)->release_cmd_direct(cmd); | |
4044 | } | |
4045 | ||
4046 | static int transport_generic_remove( | |
4047 | struct se_cmd *cmd, | |
4048 | int release_to_pool, | |
4049 | int session_reinstatement) | |
4050 | { | |
4051 | unsigned long flags; | |
4052 | ||
4053 | if (!(T_TASK(cmd))) | |
4054 | goto release_cmd; | |
4055 | ||
4056 | if (transport_dec_and_check(cmd)) { | |
4057 | if (session_reinstatement) { | |
4058 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4059 | transport_all_task_dev_remove_state(cmd); | |
4060 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
4061 | flags); | |
4062 | } | |
4063 | return 1; | |
4064 | } | |
4065 | ||
4066 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
4067 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
4068 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4069 | goto free_pages; | |
4070 | } | |
4071 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
4072 | transport_all_task_dev_remove_state(cmd); | |
4073 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
4074 | ||
4075 | transport_release_tasks(cmd); | |
4076 | free_pages: | |
4077 | transport_free_pages(cmd); | |
4078 | ||
4079 | release_cmd: | |
4080 | if (release_to_pool) { | |
4081 | transport_release_cmd_to_pool(cmd); | |
4082 | } else { | |
4083 | transport_free_se_cmd(cmd); | |
4084 | CMD_TFO(cmd)->release_cmd_direct(cmd); | |
4085 | } | |
4086 | ||
4087 | return 0; | |
4088 | } | |
4089 | ||
4090 | /* | |
4091 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map | |
4092 | * @cmd: Associated se_cmd descriptor | |
4093 | * @mem: SGL style memory for TCM WRITE / READ | |
4094 | * @sg_mem_num: Number of SGL elements | |
4095 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | |
4096 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | |
4097 | * | |
4098 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | |
4099 | * of parameters. | |
4100 | */ | |
4101 | int transport_generic_map_mem_to_cmd( | |
4102 | struct se_cmd *cmd, | |
4103 | struct scatterlist *mem, | |
4104 | u32 sg_mem_num, | |
4105 | struct scatterlist *mem_bidi_in, | |
4106 | u32 sg_mem_bidi_num) | |
4107 | { | |
4108 | u32 se_mem_cnt_out = 0; | |
4109 | int ret; | |
4110 | ||
4111 | if (!(mem) || !(sg_mem_num)) | |
4112 | return 0; | |
4113 | /* | |
4114 | * Passed *mem will contain a list_head containing preformatted | |
4115 | * struct se_mem elements... | |
4116 | */ | |
4117 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { | |
4118 | if ((mem_bidi_in) || (sg_mem_bidi_num)) { | |
4119 | printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" | |
4120 | " with BIDI-COMMAND\n"); | |
4121 | return -ENOSYS; | |
4122 | } | |
4123 | ||
4124 | T_TASK(cmd)->t_mem_list = (struct list_head *)mem; | |
4125 | T_TASK(cmd)->t_tasks_se_num = sg_mem_num; | |
4126 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; | |
4127 | return 0; | |
4128 | } | |
4129 | /* | |
4130 | * Otherwise, assume the caller is passing a struct scatterlist | |
4131 | * array from include/linux/scatterlist.h | |
4132 | */ | |
4133 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | |
4134 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | |
4135 | /* | |
4136 | * For CDB using TCM struct se_mem linked list scatterlist memory | |
4137 | * processed into a TCM struct se_subsystem_dev, we do the mapping | |
4138 | * from the passed physical memory to struct se_mem->se_page here. | |
4139 | */ | |
4140 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | |
4141 | if (!(T_TASK(cmd)->t_mem_list)) | |
4142 | return -ENOMEM; | |
4143 | ||
4144 | ret = transport_map_sg_to_mem(cmd, | |
4145 | T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); | |
4146 | if (ret < 0) | |
4147 | return -ENOMEM; | |
4148 | ||
4149 | T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; | |
4150 | /* | |
4151 | * Setup BIDI READ list of struct se_mem elements | |
4152 | */ | |
4153 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { | |
4154 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | |
4155 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | |
4156 | kfree(T_TASK(cmd)->t_mem_list); | |
4157 | return -ENOMEM; | |
4158 | } | |
4159 | se_mem_cnt_out = 0; | |
4160 | ||
4161 | ret = transport_map_sg_to_mem(cmd, | |
4162 | T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, | |
4163 | &se_mem_cnt_out); | |
4164 | if (ret < 0) { | |
4165 | kfree(T_TASK(cmd)->t_mem_list); | |
4166 | return -ENOMEM; | |
4167 | } | |
4168 | ||
4169 | T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; | |
4170 | } | |
4171 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
4172 | ||
4173 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | |
4174 | if (mem_bidi_in || sg_mem_bidi_num) { | |
4175 | printk(KERN_ERR "BIDI-Commands not supported using " | |
4176 | "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); | |
4177 | return -ENOSYS; | |
4178 | } | |
4179 | /* | |
4180 | * For incoming CDBs using a contiguous buffer internall with TCM, | |
4181 | * save the passed struct scatterlist memory. After TCM storage object | |
4182 | * processing has completed for this struct se_cmd, TCM core will call | |
4183 | * transport_memcpy_[write,read]_contig() as necessary from | |
4184 | * transport_generic_complete_ok() and transport_write_pending() in order | |
4185 | * to copy the TCM buffer to/from the original passed *mem in SGL -> | |
4186 | * struct scatterlist format. | |
4187 | */ | |
4188 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; | |
4189 | T_TASK(cmd)->t_task_pt_sgl = mem; | |
4190 | } | |
4191 | ||
4192 | return 0; | |
4193 | } | |
4194 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |
4195 | ||
4196 | ||
4197 | static inline long long transport_dev_end_lba(struct se_device *dev) | |
4198 | { | |
4199 | return dev->transport->get_blocks(dev) + 1; | |
4200 | } | |
4201 | ||
4202 | static int transport_get_sectors(struct se_cmd *cmd) | |
4203 | { | |
4204 | struct se_device *dev = SE_DEV(cmd); | |
4205 | ||
4206 | T_TASK(cmd)->t_tasks_sectors = | |
4207 | (cmd->data_length / DEV_ATTRIB(dev)->block_size); | |
4208 | if (!(T_TASK(cmd)->t_tasks_sectors)) | |
4209 | T_TASK(cmd)->t_tasks_sectors = 1; | |
4210 | ||
4211 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) | |
4212 | return 0; | |
4213 | ||
4214 | if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > | |
4215 | transport_dev_end_lba(dev)) { | |
4216 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | |
4217 | " transport_dev_end_lba(): %llu\n", | |
4218 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | |
4219 | transport_dev_end_lba(dev)); | |
4220 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4221 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
4222 | return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; | |
4223 | } | |
4224 | ||
4225 | return 0; | |
4226 | } | |
4227 | ||
4228 | static int transport_new_cmd_obj(struct se_cmd *cmd) | |
4229 | { | |
4230 | struct se_device *dev = SE_DEV(cmd); | |
4231 | u32 task_cdbs = 0, rc; | |
4232 | ||
4233 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | |
4234 | task_cdbs++; | |
4235 | T_TASK(cmd)->t_task_cdbs++; | |
4236 | } else { | |
4237 | int set_counts = 1; | |
4238 | ||
4239 | /* | |
4240 | * Setup any BIDI READ tasks and memory from | |
4241 | * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks | |
4242 | * are queued first for the non pSCSI passthrough case. | |
4243 | */ | |
4244 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | |
4245 | (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | |
4246 | rc = transport_generic_get_cdb_count(cmd, | |
4247 | T_TASK(cmd)->t_task_lba, | |
4248 | T_TASK(cmd)->t_tasks_sectors, | |
4249 | DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, | |
4250 | set_counts); | |
4251 | if (!(rc)) { | |
4252 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4253 | cmd->scsi_sense_reason = | |
4254 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
4255 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
4256 | } | |
4257 | set_counts = 0; | |
4258 | } | |
4259 | /* | |
4260 | * Setup the tasks and memory from T_TASK(cmd)->t_mem_list | |
4261 | * Note for BIDI transfers this will contain the WRITE payload | |
4262 | */ | |
4263 | task_cdbs = transport_generic_get_cdb_count(cmd, | |
4264 | T_TASK(cmd)->t_task_lba, | |
4265 | T_TASK(cmd)->t_tasks_sectors, | |
4266 | cmd->data_direction, T_TASK(cmd)->t_mem_list, | |
4267 | set_counts); | |
4268 | if (!(task_cdbs)) { | |
4269 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4270 | cmd->scsi_sense_reason = | |
4271 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
4272 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
4273 | } | |
4274 | T_TASK(cmd)->t_task_cdbs += task_cdbs; | |
4275 | ||
4276 | #if 0 | |
4277 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | |
4278 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | |
4279 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | |
4280 | T_TASK(cmd)->t_task_cdbs); | |
4281 | #endif | |
4282 | } | |
4283 | ||
4284 | atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); | |
4285 | atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); | |
4286 | atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); | |
4287 | return 0; | |
4288 | } | |
4289 | ||
4290 | static struct list_head *transport_init_se_mem_list(void) | |
4291 | { | |
4292 | struct list_head *se_mem_list; | |
4293 | ||
4294 | se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); | |
4295 | if (!(se_mem_list)) { | |
4296 | printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); | |
4297 | return NULL; | |
4298 | } | |
4299 | INIT_LIST_HEAD(se_mem_list); | |
4300 | ||
4301 | return se_mem_list; | |
4302 | } | |
4303 | ||
4304 | static int | |
4305 | transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |
4306 | { | |
4307 | unsigned char *buf; | |
4308 | struct se_mem *se_mem; | |
4309 | ||
4310 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | |
4311 | if (!(T_TASK(cmd)->t_mem_list)) | |
4312 | return -ENOMEM; | |
4313 | ||
4314 | /* | |
4315 | * If the device uses memory mapping this is enough. | |
4316 | */ | |
4317 | if (cmd->se_dev->transport->do_se_mem_map) | |
4318 | return 0; | |
4319 | ||
4320 | /* | |
4321 | * Setup BIDI-COMMAND READ list of struct se_mem elements | |
4322 | */ | |
4323 | if (T_TASK(cmd)->t_tasks_bidi) { | |
4324 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | |
4325 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | |
4326 | kfree(T_TASK(cmd)->t_mem_list); | |
4327 | return -ENOMEM; | |
4328 | } | |
4329 | } | |
4330 | ||
4331 | while (length) { | |
4332 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | |
4333 | if (!(se_mem)) { | |
4334 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | |
4335 | goto out; | |
4336 | } | |
4337 | INIT_LIST_HEAD(&se_mem->se_list); | |
4338 | se_mem->se_len = (length > dma_size) ? dma_size : length; | |
4339 | ||
4340 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | |
4341 | se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); | |
4342 | if (!(se_mem->se_page)) { | |
4343 | printk(KERN_ERR "alloc_pages() failed\n"); | |
4344 | goto out; | |
4345 | } | |
4346 | ||
4347 | buf = kmap_atomic(se_mem->se_page, KM_IRQ0); | |
4348 | if (!(buf)) { | |
4349 | printk(KERN_ERR "kmap_atomic() failed\n"); | |
4350 | goto out; | |
4351 | } | |
4352 | memset(buf, 0, se_mem->se_len); | |
4353 | kunmap_atomic(buf, KM_IRQ0); | |
4354 | ||
4355 | list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); | |
4356 | T_TASK(cmd)->t_tasks_se_num++; | |
4357 | ||
4358 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | |
4359 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | |
4360 | se_mem->se_off); | |
4361 | ||
4362 | length -= se_mem->se_len; | |
4363 | } | |
4364 | ||
4365 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | |
4366 | T_TASK(cmd)->t_tasks_se_num); | |
4367 | ||
4368 | return 0; | |
4369 | out: | |
4370 | return -1; | |
4371 | } | |
4372 | ||
4373 | extern u32 transport_calc_sg_num( | |
4374 | struct se_task *task, | |
4375 | struct se_mem *in_se_mem, | |
4376 | u32 task_offset) | |
4377 | { | |
4378 | struct se_cmd *se_cmd = task->task_se_cmd; | |
4379 | struct se_device *se_dev = SE_DEV(se_cmd); | |
4380 | struct se_mem *se_mem = in_se_mem; | |
4381 | struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); | |
4382 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; | |
4383 | ||
4384 | while (task_size != 0) { | |
4385 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" | |
4386 | " se_mem->se_off(%u) task_offset(%u)\n", | |
4387 | se_mem->se_page, se_mem->se_len, | |
4388 | se_mem->se_off, task_offset); | |
4389 | ||
4390 | if (task_offset == 0) { | |
4391 | if (task_size >= se_mem->se_len) { | |
4392 | sg_length = se_mem->se_len; | |
4393 | ||
4394 | if (!(list_is_last(&se_mem->se_list, | |
4395 | T_TASK(se_cmd)->t_mem_list))) | |
4396 | se_mem = list_entry(se_mem->se_list.next, | |
4397 | struct se_mem, se_list); | |
4398 | } else { | |
4399 | sg_length = task_size; | |
4400 | task_size -= sg_length; | |
4401 | goto next; | |
4402 | } | |
4403 | ||
4404 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | |
4405 | sg_length, task_size); | |
4406 | } else { | |
4407 | if ((se_mem->se_len - task_offset) > task_size) { | |
4408 | sg_length = task_size; | |
4409 | task_size -= sg_length; | |
4410 | goto next; | |
4411 | } else { | |
4412 | sg_length = (se_mem->se_len - task_offset); | |
4413 | ||
4414 | if (!(list_is_last(&se_mem->se_list, | |
4415 | T_TASK(se_cmd)->t_mem_list))) | |
4416 | se_mem = list_entry(se_mem->se_list.next, | |
4417 | struct se_mem, se_list); | |
4418 | } | |
4419 | ||
4420 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | |
4421 | sg_length, task_size); | |
4422 | ||
4423 | task_offset = 0; | |
4424 | } | |
4425 | task_size -= sg_length; | |
4426 | next: | |
4427 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", | |
4428 | task->task_no, task_size); | |
4429 | ||
4430 | task->task_sg_num++; | |
4431 | } | |
4432 | /* | |
4433 | * Check if the fabric module driver is requesting that all | |
4434 | * struct se_task->task_sg[] be chained together.. If so, | |
4435 | * then allocate an extra padding SG entry for linking and | |
4436 | * marking the end of the chained SGL. | |
4437 | */ | |
4438 | if (tfo->task_sg_chaining) { | |
4439 | task_sg_num_padded = (task->task_sg_num + 1); | |
4440 | task->task_padded_sg = 1; | |
4441 | } else | |
4442 | task_sg_num_padded = task->task_sg_num; | |
4443 | ||
4444 | task->task_sg = kzalloc(task_sg_num_padded * | |
4445 | sizeof(struct scatterlist), GFP_KERNEL); | |
4446 | if (!(task->task_sg)) { | |
4447 | printk(KERN_ERR "Unable to allocate memory for" | |
4448 | " task->task_sg\n"); | |
4449 | return 0; | |
4450 | } | |
4451 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | |
4452 | /* | |
4453 | * Setup task->task_sg_bidi for SCSI READ payload for | |
4454 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | |
4455 | */ | |
4456 | if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && | |
4457 | (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { | |
4458 | task->task_sg_bidi = kzalloc(task_sg_num_padded * | |
4459 | sizeof(struct scatterlist), GFP_KERNEL); | |
4460 | if (!(task->task_sg_bidi)) { | |
4461 | printk(KERN_ERR "Unable to allocate memory for" | |
4462 | " task->task_sg_bidi\n"); | |
4463 | return 0; | |
4464 | } | |
4465 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | |
4466 | } | |
4467 | /* | |
4468 | * For the chaining case, setup the proper end of SGL for the | |
4469 | * initial submission struct task into struct se_subsystem_api. | |
4470 | * This will be cleared later by transport_do_task_sg_chain() | |
4471 | */ | |
4472 | if (task->task_padded_sg) { | |
4473 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); | |
4474 | /* | |
4475 | * Added the 'if' check before marking end of bi-directional | |
4476 | * scatterlist (which gets created only in case of request | |
4477 | * (RD + WR). | |
4478 | */ | |
4479 | if (task->task_sg_bidi) | |
4480 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); | |
4481 | } | |
4482 | ||
4483 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," | |
4484 | " task_sg_num_padded(%u)\n", task->task_sg_num, | |
4485 | task_sg_num_padded); | |
4486 | ||
4487 | return task->task_sg_num; | |
4488 | } | |
4489 | ||
4490 | static inline int transport_set_tasks_sectors_disk( | |
4491 | struct se_task *task, | |
4492 | struct se_device *dev, | |
4493 | unsigned long long lba, | |
4494 | u32 sectors, | |
4495 | int *max_sectors_set) | |
4496 | { | |
4497 | if ((lba + sectors) > transport_dev_end_lba(dev)) { | |
4498 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); | |
4499 | ||
4500 | if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { | |
4501 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | |
4502 | *max_sectors_set = 1; | |
4503 | } | |
4504 | } else { | |
4505 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | |
4506 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | |
4507 | *max_sectors_set = 1; | |
4508 | } else | |
4509 | task->task_sectors = sectors; | |
4510 | } | |
4511 | ||
4512 | return 0; | |
4513 | } | |
4514 | ||
4515 | static inline int transport_set_tasks_sectors_non_disk( | |
4516 | struct se_task *task, | |
4517 | struct se_device *dev, | |
4518 | unsigned long long lba, | |
4519 | u32 sectors, | |
4520 | int *max_sectors_set) | |
4521 | { | |
4522 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | |
4523 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | |
4524 | *max_sectors_set = 1; | |
4525 | } else | |
4526 | task->task_sectors = sectors; | |
4527 | ||
4528 | return 0; | |
4529 | } | |
4530 | ||
4531 | static inline int transport_set_tasks_sectors( | |
4532 | struct se_task *task, | |
4533 | struct se_device *dev, | |
4534 | unsigned long long lba, | |
4535 | u32 sectors, | |
4536 | int *max_sectors_set) | |
4537 | { | |
4538 | return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? | |
4539 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, | |
4540 | max_sectors_set) : | |
4541 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, | |
4542 | max_sectors_set); | |
4543 | } | |
4544 | ||
4545 | static int transport_map_sg_to_mem( | |
4546 | struct se_cmd *cmd, | |
4547 | struct list_head *se_mem_list, | |
4548 | void *in_mem, | |
4549 | u32 *se_mem_cnt) | |
4550 | { | |
4551 | struct se_mem *se_mem; | |
4552 | struct scatterlist *sg; | |
4553 | u32 sg_count = 1, cmd_size = cmd->data_length; | |
4554 | ||
4555 | if (!in_mem) { | |
4556 | printk(KERN_ERR "No source scatterlist\n"); | |
4557 | return -1; | |
4558 | } | |
4559 | sg = (struct scatterlist *)in_mem; | |
4560 | ||
4561 | while (cmd_size) { | |
4562 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | |
4563 | if (!(se_mem)) { | |
4564 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | |
4565 | return -1; | |
4566 | } | |
4567 | INIT_LIST_HEAD(&se_mem->se_list); | |
4568 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | |
4569 | " sg_page: %p offset: %d length: %d\n", cmd_size, | |
4570 | sg_page(sg), sg->offset, sg->length); | |
4571 | ||
4572 | se_mem->se_page = sg_page(sg); | |
4573 | se_mem->se_off = sg->offset; | |
4574 | ||
4575 | if (cmd_size > sg->length) { | |
4576 | se_mem->se_len = sg->length; | |
4577 | sg = sg_next(sg); | |
4578 | sg_count++; | |
4579 | } else | |
4580 | se_mem->se_len = cmd_size; | |
4581 | ||
4582 | cmd_size -= se_mem->se_len; | |
4583 | ||
4584 | DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", | |
4585 | *se_mem_cnt, cmd_size); | |
4586 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", | |
4587 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | |
4588 | ||
4589 | list_add_tail(&se_mem->se_list, se_mem_list); | |
4590 | (*se_mem_cnt)++; | |
4591 | } | |
4592 | ||
4593 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" | |
4594 | " struct se_mem\n", sg_count, *se_mem_cnt); | |
4595 | ||
4596 | if (sg_count != *se_mem_cnt) | |
4597 | BUG(); | |
4598 | ||
4599 | return 0; | |
4600 | } | |
4601 | ||
4602 | /* transport_map_mem_to_sg(): | |
4603 | * | |
4604 | * | |
4605 | */ | |
4606 | int transport_map_mem_to_sg( | |
4607 | struct se_task *task, | |
4608 | struct list_head *se_mem_list, | |
4609 | void *in_mem, | |
4610 | struct se_mem *in_se_mem, | |
4611 | struct se_mem **out_se_mem, | |
4612 | u32 *se_mem_cnt, | |
4613 | u32 *task_offset) | |
4614 | { | |
4615 | struct se_cmd *se_cmd = task->task_se_cmd; | |
4616 | struct se_mem *se_mem = in_se_mem; | |
4617 | struct scatterlist *sg = (struct scatterlist *)in_mem; | |
4618 | u32 task_size = task->task_size, sg_no = 0; | |
4619 | ||
4620 | if (!sg) { | |
4621 | printk(KERN_ERR "Unable to locate valid struct" | |
4622 | " scatterlist pointer\n"); | |
4623 | return -1; | |
4624 | } | |
4625 | ||
4626 | while (task_size != 0) { | |
4627 | /* | |
4628 | * Setup the contigious array of scatterlists for | |
4629 | * this struct se_task. | |
4630 | */ | |
4631 | sg_assign_page(sg, se_mem->se_page); | |
4632 | ||
4633 | if (*task_offset == 0) { | |
4634 | sg->offset = se_mem->se_off; | |
4635 | ||
4636 | if (task_size >= se_mem->se_len) { | |
4637 | sg->length = se_mem->se_len; | |
4638 | ||
4639 | if (!(list_is_last(&se_mem->se_list, | |
4640 | T_TASK(se_cmd)->t_mem_list))) { | |
4641 | se_mem = list_entry(se_mem->se_list.next, | |
4642 | struct se_mem, se_list); | |
4643 | (*se_mem_cnt)++; | |
4644 | } | |
4645 | } else { | |
4646 | sg->length = task_size; | |
4647 | /* | |
4648 | * Determine if we need to calculate an offset | |
4649 | * into the struct se_mem on the next go around.. | |
4650 | */ | |
4651 | task_size -= sg->length; | |
4652 | if (!(task_size)) | |
4653 | *task_offset = sg->length; | |
4654 | ||
4655 | goto next; | |
4656 | } | |
4657 | ||
4658 | } else { | |
4659 | sg->offset = (*task_offset + se_mem->se_off); | |
4660 | ||
4661 | if ((se_mem->se_len - *task_offset) > task_size) { | |
4662 | sg->length = task_size; | |
4663 | /* | |
4664 | * Determine if we need to calculate an offset | |
4665 | * into the struct se_mem on the next go around.. | |
4666 | */ | |
4667 | task_size -= sg->length; | |
4668 | if (!(task_size)) | |
4669 | *task_offset += sg->length; | |
4670 | ||
4671 | goto next; | |
4672 | } else { | |
4673 | sg->length = (se_mem->se_len - *task_offset); | |
4674 | ||
4675 | if (!(list_is_last(&se_mem->se_list, | |
4676 | T_TASK(se_cmd)->t_mem_list))) { | |
4677 | se_mem = list_entry(se_mem->se_list.next, | |
4678 | struct se_mem, se_list); | |
4679 | (*se_mem_cnt)++; | |
4680 | } | |
4681 | } | |
4682 | ||
4683 | *task_offset = 0; | |
4684 | } | |
4685 | task_size -= sg->length; | |
4686 | next: | |
4687 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" | |
4688 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, | |
4689 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); | |
4690 | ||
4691 | sg_no++; | |
4692 | if (!(task_size)) | |
4693 | break; | |
4694 | ||
4695 | sg = sg_next(sg); | |
4696 | ||
4697 | if (task_size > se_cmd->data_length) | |
4698 | BUG(); | |
4699 | } | |
4700 | *out_se_mem = se_mem; | |
4701 | ||
4702 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" | |
4703 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); | |
4704 | ||
4705 | return 0; | |
4706 | } | |
4707 | ||
4708 | /* | |
4709 | * This function can be used by HW target mode drivers to create a linked | |
4710 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | |
4711 | * This is intended to be called during the completion path by TCM Core | |
4712 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | |
4713 | */ | |
4714 | void transport_do_task_sg_chain(struct se_cmd *cmd) | |
4715 | { | |
4716 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; | |
4717 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | |
4718 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | |
4719 | struct se_task *task; | |
4720 | struct target_core_fabric_ops *tfo = CMD_TFO(cmd); | |
4721 | u32 task_sg_num = 0, sg_count = 0; | |
4722 | int i; | |
4723 | ||
4724 | if (tfo->task_sg_chaining == 0) { | |
4725 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" | |
4726 | " %s\n", tfo->get_fabric_name()); | |
4727 | dump_stack(); | |
4728 | return; | |
4729 | } | |
4730 | /* | |
4731 | * Walk the struct se_task list and setup scatterlist chains | |
4732 | * for each contiguosly allocated struct se_task->task_sg[]. | |
4733 | */ | |
4734 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
4735 | if (!(task->task_sg) || !(task->task_padded_sg)) | |
4736 | continue; | |
4737 | ||
4738 | if (sg_head && sg_link) { | |
4739 | sg_head_cur = &task->task_sg[0]; | |
4740 | sg_link_cur = &task->task_sg[task->task_sg_num]; | |
4741 | /* | |
4742 | * Either add chain or mark end of scatterlist | |
4743 | */ | |
4744 | if (!(list_is_last(&task->t_list, | |
4745 | &T_TASK(cmd)->t_task_list))) { | |
4746 | /* | |
4747 | * Clear existing SGL termination bit set in | |
4748 | * transport_calc_sg_num(), see sg_mark_end() | |
4749 | */ | |
4750 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | |
4751 | sg_end_cur->page_link &= ~0x02; | |
4752 | ||
4753 | sg_chain(sg_head, task_sg_num, sg_head_cur); | |
4754 | sg_count += (task->task_sg_num + 1); | |
4755 | } else | |
4756 | sg_count += task->task_sg_num; | |
4757 | ||
4758 | sg_head = sg_head_cur; | |
4759 | sg_link = sg_link_cur; | |
4760 | task_sg_num = task->task_sg_num; | |
4761 | continue; | |
4762 | } | |
4763 | sg_head = sg_first = &task->task_sg[0]; | |
4764 | sg_link = &task->task_sg[task->task_sg_num]; | |
4765 | task_sg_num = task->task_sg_num; | |
4766 | /* | |
4767 | * Check for single task.. | |
4768 | */ | |
4769 | if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { | |
4770 | /* | |
4771 | * Clear existing SGL termination bit set in | |
4772 | * transport_calc_sg_num(), see sg_mark_end() | |
4773 | */ | |
4774 | sg_end = &task->task_sg[task->task_sg_num - 1]; | |
4775 | sg_end->page_link &= ~0x02; | |
4776 | sg_count += (task->task_sg_num + 1); | |
4777 | } else | |
4778 | sg_count += task->task_sg_num; | |
4779 | } | |
4780 | /* | |
4781 | * Setup the starting pointer and total t_tasks_sg_linked_no including | |
4782 | * padding SGs for linking and to mark the end. | |
4783 | */ | |
4784 | T_TASK(cmd)->t_tasks_sg_chained = sg_first; | |
4785 | T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; | |
4786 | ||
4787 | DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" | |
4788 | " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, | |
4789 | T_TASK(cmd)->t_tasks_sg_chained_no); | |
4790 | ||
4791 | for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, | |
4792 | T_TASK(cmd)->t_tasks_sg_chained_no, i) { | |
4793 | ||
4794 | DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", | |
4795 | sg, sg_page(sg), sg->length, sg->offset); | |
4796 | if (sg_is_chain(sg)) | |
4797 | DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); | |
4798 | if (sg_is_last(sg)) | |
4799 | DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); | |
4800 | } | |
4801 | ||
4802 | } | |
4803 | EXPORT_SYMBOL(transport_do_task_sg_chain); | |
4804 | ||
4805 | static int transport_do_se_mem_map( | |
4806 | struct se_device *dev, | |
4807 | struct se_task *task, | |
4808 | struct list_head *se_mem_list, | |
4809 | void *in_mem, | |
4810 | struct se_mem *in_se_mem, | |
4811 | struct se_mem **out_se_mem, | |
4812 | u32 *se_mem_cnt, | |
4813 | u32 *task_offset_in) | |
4814 | { | |
4815 | u32 task_offset = *task_offset_in; | |
4816 | int ret = 0; | |
4817 | /* | |
4818 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | |
4819 | * has been done by the transport plugin. | |
4820 | */ | |
4821 | if (TRANSPORT(dev)->do_se_mem_map) { | |
4822 | ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, | |
4823 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, | |
4824 | task_offset_in); | |
4825 | if (ret == 0) | |
4826 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | |
4827 | ||
4828 | return ret; | |
4829 | } | |
4830 | /* | |
4831 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | |
4832 | * WRITE payloads.. If we need to do BIDI READ passthrough for | |
4833 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | |
4834 | * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the | |
4835 | * allocation for task->task_sg_bidi, and the subsequent call to | |
4836 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | |
4837 | */ | |
4838 | if (!(task->task_sg_bidi)) { | |
4839 | /* | |
4840 | * Assume default that transport plugin speaks preallocated | |
4841 | * scatterlists. | |
4842 | */ | |
4843 | if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) | |
4844 | return -1; | |
4845 | /* | |
4846 | * struct se_task->task_sg now contains the struct scatterlist array. | |
4847 | */ | |
4848 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | |
4849 | in_se_mem, out_se_mem, se_mem_cnt, | |
4850 | task_offset_in); | |
4851 | } | |
4852 | /* | |
4853 | * Handle the se_mem_list -> struct task->task_sg_bidi | |
4854 | * memory map for the extra BIDI READ payload | |
4855 | */ | |
4856 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, | |
4857 | in_se_mem, out_se_mem, se_mem_cnt, | |
4858 | task_offset_in); | |
4859 | } | |
4860 | ||
4861 | static u32 transport_generic_get_cdb_count( | |
4862 | struct se_cmd *cmd, | |
4863 | unsigned long long lba, | |
4864 | u32 sectors, | |
4865 | enum dma_data_direction data_direction, | |
4866 | struct list_head *mem_list, | |
4867 | int set_counts) | |
4868 | { | |
4869 | unsigned char *cdb = NULL; | |
4870 | struct se_task *task; | |
4871 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | |
4872 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; | |
4873 | struct se_device *dev = SE_DEV(cmd); | |
4874 | int max_sectors_set = 0, ret; | |
4875 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; | |
4876 | ||
4877 | if (!mem_list) { | |
4878 | printk(KERN_ERR "mem_list is NULL in transport_generic_get" | |
4879 | "_cdb_count()\n"); | |
4880 | return 0; | |
4881 | } | |
4882 | /* | |
4883 | * While using RAMDISK_DR backstores is the only case where | |
4884 | * mem_list will ever be empty at this point. | |
4885 | */ | |
4886 | if (!(list_empty(mem_list))) | |
4887 | se_mem = list_entry(mem_list->next, struct se_mem, se_list); | |
4888 | /* | |
4889 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | |
4890 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | |
4891 | */ | |
4892 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | |
4893 | !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && | |
4894 | (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) | |
4895 | se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, | |
4896 | struct se_mem, se_list); | |
4897 | ||
4898 | while (sectors) { | |
4899 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", | |
4900 | CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, | |
4901 | transport_dev_end_lba(dev)); | |
4902 | ||
4903 | task = transport_generic_get_task(cmd, data_direction); | |
4904 | if (!(task)) | |
4905 | goto out; | |
4906 | ||
4907 | transport_set_tasks_sectors(task, dev, lba, sectors, | |
4908 | &max_sectors_set); | |
4909 | ||
4910 | task->task_lba = lba; | |
4911 | lba += task->task_sectors; | |
4912 | sectors -= task->task_sectors; | |
4913 | task->task_size = (task->task_sectors * | |
4914 | DEV_ATTRIB(dev)->block_size); | |
4915 | ||
4916 | cdb = TRANSPORT(dev)->get_cdb(task); | |
4917 | if ((cdb)) { | |
4918 | memcpy(cdb, T_TASK(cmd)->t_task_cdb, | |
4919 | scsi_command_size(T_TASK(cmd)->t_task_cdb)); | |
4920 | cmd->transport_split_cdb(task->task_lba, | |
4921 | &task->task_sectors, cdb); | |
4922 | } | |
4923 | ||
4924 | /* | |
4925 | * Perform the SE OBJ plugin and/or Transport plugin specific | |
4926 | * mapping for T_TASK(cmd)->t_mem_list. And setup the | |
4927 | * task->task_sg and if necessary task->task_sg_bidi | |
4928 | */ | |
4929 | ret = transport_do_se_mem_map(dev, task, mem_list, | |
4930 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, | |
4931 | &task_offset_in); | |
4932 | if (ret < 0) | |
4933 | goto out; | |
4934 | ||
4935 | se_mem = se_mem_lout; | |
4936 | /* | |
4937 | * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi | |
4938 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI | |
4939 | * | |
4940 | * Note that the first call to transport_do_se_mem_map() above will | |
4941 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | |
4942 | * -> transport_calc_sg_num(), and the second here will do the | |
4943 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. | |
4944 | */ | |
4945 | if (task->task_sg_bidi != NULL) { | |
4946 | ret = transport_do_se_mem_map(dev, task, | |
4947 | T_TASK(cmd)->t_mem_bidi_list, NULL, | |
4948 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, | |
4949 | &task_offset_in); | |
4950 | if (ret < 0) | |
4951 | goto out; | |
4952 | ||
4953 | se_mem_bidi = se_mem_bidi_lout; | |
4954 | } | |
4955 | task_cdbs++; | |
4956 | ||
4957 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", | |
4958 | task_cdbs, task->task_sg_num); | |
4959 | ||
4960 | if (max_sectors_set) { | |
4961 | max_sectors_set = 0; | |
4962 | continue; | |
4963 | } | |
4964 | ||
4965 | if (!sectors) | |
4966 | break; | |
4967 | } | |
4968 | ||
4969 | if (set_counts) { | |
4970 | atomic_inc(&T_TASK(cmd)->t_fe_count); | |
4971 | atomic_inc(&T_TASK(cmd)->t_se_count); | |
4972 | } | |
4973 | ||
4974 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | |
4975 | CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) | |
4976 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); | |
4977 | ||
4978 | return task_cdbs; | |
4979 | out: | |
4980 | return 0; | |
4981 | } | |
4982 | ||
4983 | static int | |
4984 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | |
4985 | { | |
4986 | struct se_device *dev = SE_DEV(cmd); | |
4987 | unsigned char *cdb; | |
4988 | struct se_task *task; | |
4989 | int ret; | |
4990 | ||
4991 | task = transport_generic_get_task(cmd, cmd->data_direction); | |
4992 | if (!task) | |
4993 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
4994 | ||
4995 | cdb = TRANSPORT(dev)->get_cdb(task); | |
4996 | if (cdb) | |
4997 | memcpy(cdb, cmd->t_task->t_task_cdb, | |
4998 | scsi_command_size(cmd->t_task->t_task_cdb)); | |
4999 | ||
5000 | task->task_size = cmd->data_length; | |
5001 | task->task_sg_num = | |
5002 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; | |
5003 | ||
5004 | atomic_inc(&cmd->t_task->t_fe_count); | |
5005 | atomic_inc(&cmd->t_task->t_se_count); | |
5006 | ||
5007 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | |
5008 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | |
5009 | u32 se_mem_cnt = 0, task_offset = 0; | |
5010 | ||
5011 | BUG_ON(list_empty(cmd->t_task->t_mem_list)); | |
5012 | ||
5013 | ret = transport_do_se_mem_map(dev, task, | |
5014 | cmd->t_task->t_mem_list, NULL, se_mem, | |
5015 | &se_mem_lout, &se_mem_cnt, &task_offset); | |
5016 | if (ret < 0) | |
5017 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
5018 | ||
5019 | if (dev->transport->map_task_SG) | |
5020 | return dev->transport->map_task_SG(task); | |
5021 | return 0; | |
5022 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | |
5023 | if (dev->transport->map_task_non_SG) | |
5024 | return dev->transport->map_task_non_SG(task); | |
5025 | return 0; | |
5026 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | |
5027 | if (dev->transport->cdb_none) | |
5028 | return dev->transport->cdb_none(task); | |
5029 | return 0; | |
5030 | } else { | |
5031 | BUG(); | |
5032 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
5033 | } | |
5034 | } | |
5035 | ||
5036 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | |
5037 | * | |
5038 | * Allocate storage transport resources from a set of values predefined | |
5039 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | |
5040 | * Any non zero return here is treated as an "out of resource' op here. | |
5041 | */ | |
5042 | /* | |
5043 | * Generate struct se_task(s) and/or their payloads for this CDB. | |
5044 | */ | |
5045 | static int transport_generic_new_cmd(struct se_cmd *cmd) | |
5046 | { | |
5047 | struct se_portal_group *se_tpg; | |
5048 | struct se_task *task; | |
5049 | struct se_device *dev = SE_DEV(cmd); | |
5050 | int ret = 0; | |
5051 | ||
5052 | /* | |
5053 | * Determine is the TCM fabric module has already allocated physical | |
5054 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | |
5055 | * to setup beforehand the linked list of physical memory at | |
5056 | * T_TASK(cmd)->t_mem_list of struct se_mem->se_page | |
5057 | */ | |
5058 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | |
5059 | ret = transport_allocate_resources(cmd); | |
5060 | if (ret < 0) | |
5061 | return ret; | |
5062 | } | |
5063 | ||
5064 | ret = transport_get_sectors(cmd); | |
5065 | if (ret < 0) | |
5066 | return ret; | |
5067 | ||
5068 | ret = transport_new_cmd_obj(cmd); | |
5069 | if (ret < 0) | |
5070 | return ret; | |
5071 | ||
5072 | /* | |
5073 | * Determine if the calling TCM fabric module is talking to | |
5074 | * Linux/NET via kernel sockets and needs to allocate a | |
5075 | * struct iovec array to complete the struct se_cmd | |
5076 | */ | |
5077 | se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; | |
5078 | if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { | |
5079 | ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); | |
5080 | if (ret < 0) | |
5081 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
5082 | } | |
5083 | ||
5084 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | |
5085 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | |
5086 | if (atomic_read(&task->task_sent)) | |
5087 | continue; | |
5088 | if (!dev->transport->map_task_SG) | |
5089 | continue; | |
5090 | ||
5091 | ret = dev->transport->map_task_SG(task); | |
5092 | if (ret < 0) | |
5093 | return ret; | |
5094 | } | |
5095 | } else { | |
5096 | ret = transport_map_control_cmd_to_task(cmd); | |
5097 | if (ret < 0) | |
5098 | return ret; | |
5099 | } | |
5100 | ||
5101 | /* | |
5102 | * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. | |
5103 | * This WRITE struct se_cmd (and all of its associated struct se_task's) | |
5104 | * will be added to the struct se_device execution queue after its WRITE | |
5105 | * data has arrived. (ie: It gets handled by the transport processing | |
5106 | * thread a second time) | |
5107 | */ | |
5108 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
5109 | transport_add_tasks_to_state_queue(cmd); | |
5110 | return transport_generic_write_pending(cmd); | |
5111 | } | |
5112 | /* | |
5113 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | |
5114 | * to the execution queue. | |
5115 | */ | |
5116 | transport_execute_tasks(cmd); | |
5117 | return 0; | |
5118 | } | |
5119 | ||
5120 | /* transport_generic_process_write(): | |
5121 | * | |
5122 | * | |
5123 | */ | |
5124 | void transport_generic_process_write(struct se_cmd *cmd) | |
5125 | { | |
5126 | #if 0 | |
5127 | /* | |
5128 | * Copy SCSI Presented DTL sector(s) from received buffers allocated to | |
5129 | * original EDTL | |
5130 | */ | |
5131 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | |
5132 | if (!T_TASK(cmd)->t_tasks_se_num) { | |
5133 | unsigned char *dst, *buf = | |
5134 | (unsigned char *)T_TASK(cmd)->t_task_buf; | |
5135 | ||
5136 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); | |
5137 | if (!(dst)) { | |
5138 | printk(KERN_ERR "Unable to allocate memory for" | |
5139 | " WRITE underflow\n"); | |
5140 | transport_generic_request_failure(cmd, NULL, | |
5141 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
5142 | return; | |
5143 | } | |
5144 | memcpy(dst, buf, cmd->cmd_spdtl); | |
5145 | ||
5146 | kfree(T_TASK(cmd)->t_task_buf); | |
5147 | T_TASK(cmd)->t_task_buf = dst; | |
5148 | } else { | |
5149 | struct scatterlist *sg = | |
5150 | (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; | |
5151 | struct scatterlist *orig_sg; | |
5152 | ||
5153 | orig_sg = kzalloc(sizeof(struct scatterlist) * | |
5154 | T_TASK(cmd)->t_tasks_se_num, | |
5155 | GFP_KERNEL))) { | |
5156 | if (!(orig_sg)) { | |
5157 | printk(KERN_ERR "Unable to allocate memory" | |
5158 | " for WRITE underflow\n"); | |
5159 | transport_generic_request_failure(cmd, NULL, | |
5160 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
5161 | return; | |
5162 | } | |
5163 | ||
5164 | memcpy(orig_sg, T_TASK(cmd)->t_task_buf, | |
5165 | sizeof(struct scatterlist) * | |
5166 | T_TASK(cmd)->t_tasks_se_num); | |
5167 | ||
5168 | cmd->data_length = cmd->cmd_spdtl; | |
5169 | /* | |
5170 | * FIXME, clear out original struct se_task and state | |
5171 | * information. | |
5172 | */ | |
5173 | if (transport_generic_new_cmd(cmd) < 0) { | |
5174 | transport_generic_request_failure(cmd, NULL, | |
5175 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
5176 | kfree(orig_sg); | |
5177 | return; | |
5178 | } | |
5179 | ||
5180 | transport_memcpy_write_sg(cmd, orig_sg); | |
5181 | } | |
5182 | } | |
5183 | #endif | |
5184 | transport_execute_tasks(cmd); | |
5185 | } | |
5186 | EXPORT_SYMBOL(transport_generic_process_write); | |
5187 | ||
5188 | /* transport_generic_write_pending(): | |
5189 | * | |
5190 | * | |
5191 | */ | |
5192 | static int transport_generic_write_pending(struct se_cmd *cmd) | |
5193 | { | |
5194 | unsigned long flags; | |
5195 | int ret; | |
5196 | ||
5197 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5198 | cmd->t_state = TRANSPORT_WRITE_PENDING; | |
5199 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5200 | /* | |
5201 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | |
5202 | * from the passed Linux/SCSI struct scatterlist located at | |
5203 | * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at | |
5204 | * T_TASK(se_cmd)->t_task_buf. | |
5205 | */ | |
5206 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | |
5207 | transport_memcpy_read_contig(cmd, | |
5208 | T_TASK(cmd)->t_task_buf, | |
5209 | T_TASK(cmd)->t_task_pt_sgl); | |
5210 | /* | |
5211 | * Clear the se_cmd for WRITE_PENDING status in order to set | |
5212 | * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data | |
5213 | * can be called from HW target mode interrupt code. This is safe | |
5214 | * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending | |
5215 | * because the se_cmd->se_lun pointer is not being cleared. | |
5216 | */ | |
5217 | transport_cmd_check_stop(cmd, 1, 0); | |
5218 | ||
5219 | /* | |
5220 | * Call the fabric write_pending function here to let the | |
5221 | * frontend know that WRITE buffers are ready. | |
5222 | */ | |
5223 | ret = CMD_TFO(cmd)->write_pending(cmd); | |
5224 | if (ret < 0) | |
5225 | return ret; | |
5226 | ||
5227 | return PYX_TRANSPORT_WRITE_PENDING; | |
5228 | } | |
5229 | ||
5230 | /* transport_release_cmd_to_pool(): | |
5231 | * | |
5232 | * | |
5233 | */ | |
5234 | void transport_release_cmd_to_pool(struct se_cmd *cmd) | |
5235 | { | |
5236 | BUG_ON(!T_TASK(cmd)); | |
5237 | BUG_ON(!CMD_TFO(cmd)); | |
5238 | ||
5239 | transport_free_se_cmd(cmd); | |
5240 | CMD_TFO(cmd)->release_cmd_to_pool(cmd); | |
5241 | } | |
5242 | EXPORT_SYMBOL(transport_release_cmd_to_pool); | |
5243 | ||
5244 | /* transport_generic_free_cmd(): | |
5245 | * | |
5246 | * Called from processing frontend to release storage engine resources | |
5247 | */ | |
5248 | void transport_generic_free_cmd( | |
5249 | struct se_cmd *cmd, | |
5250 | int wait_for_tasks, | |
5251 | int release_to_pool, | |
5252 | int session_reinstatement) | |
5253 | { | |
5254 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) | |
5255 | transport_release_cmd_to_pool(cmd); | |
5256 | else { | |
5257 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | |
5258 | ||
5259 | if (SE_LUN(cmd)) { | |
5260 | #if 0 | |
5261 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" | |
5262 | " SE_LUN(cmd)\n", cmd, | |
5263 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5264 | #endif | |
5265 | transport_lun_remove_cmd(cmd); | |
5266 | } | |
5267 | ||
5268 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | |
5269 | cmd->transport_wait_for_tasks(cmd, 0, 0); | |
5270 | ||
5271 | transport_generic_remove(cmd, release_to_pool, | |
5272 | session_reinstatement); | |
5273 | } | |
5274 | } | |
5275 | EXPORT_SYMBOL(transport_generic_free_cmd); | |
5276 | ||
5277 | static void transport_nop_wait_for_tasks( | |
5278 | struct se_cmd *cmd, | |
5279 | int remove_cmd, | |
5280 | int session_reinstatement) | |
5281 | { | |
5282 | return; | |
5283 | } | |
5284 | ||
5285 | /* transport_lun_wait_for_tasks(): | |
5286 | * | |
5287 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | |
5288 | * an struct se_lun to be successfully shutdown. | |
5289 | */ | |
5290 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |
5291 | { | |
5292 | unsigned long flags; | |
5293 | int ret; | |
5294 | /* | |
5295 | * If the frontend has already requested this struct se_cmd to | |
5296 | * be stopped, we can safely ignore this struct se_cmd. | |
5297 | */ | |
5298 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5299 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | |
5300 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | |
5301 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" | |
5302 | " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); | |
5303 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5304 | transport_cmd_check_stop(cmd, 1, 0); | |
5305 | return -1; | |
5306 | } | |
5307 | atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); | |
5308 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5309 | ||
5310 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | |
5311 | ||
5312 | ret = transport_stop_tasks_for_cmd(cmd); | |
5313 | ||
5314 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" | |
5315 | " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); | |
5316 | if (!ret) { | |
5317 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | |
5318 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5319 | wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); | |
5320 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | |
5321 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5322 | } | |
5323 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | |
5324 | ||
5325 | return 0; | |
5326 | } | |
5327 | ||
5328 | /* #define DEBUG_CLEAR_LUN */ | |
5329 | #ifdef DEBUG_CLEAR_LUN | |
5330 | #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) | |
5331 | #else | |
5332 | #define DEBUG_CLEAR_L(x...) | |
5333 | #endif | |
5334 | ||
5335 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |
5336 | { | |
5337 | struct se_cmd *cmd = NULL; | |
5338 | unsigned long lun_flags, cmd_flags; | |
5339 | /* | |
5340 | * Do exception processing and return CHECK_CONDITION status to the | |
5341 | * Initiator Port. | |
5342 | */ | |
5343 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5344 | while (!list_empty_careful(&lun->lun_cmd_list)) { | |
5345 | cmd = list_entry(lun->lun_cmd_list.next, | |
5346 | struct se_cmd, se_lun_list); | |
5347 | list_del(&cmd->se_lun_list); | |
5348 | ||
5349 | if (!(T_TASK(cmd))) { | |
5350 | printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" | |
5351 | "[i,t]_state: %u/%u\n", | |
5352 | CMD_TFO(cmd)->get_task_tag(cmd), | |
5353 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | |
5354 | BUG(); | |
5355 | } | |
5356 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | |
5357 | /* | |
5358 | * This will notify iscsi_target_transport.c: | |
5359 | * transport_cmd_check_stop() that a LUN shutdown is in | |
5360 | * progress for the iscsi_cmd_t. | |
5361 | */ | |
5362 | spin_lock(&T_TASK(cmd)->t_state_lock); | |
5363 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" | |
5364 | "_lun_stop for ITT: 0x%08x\n", | |
5365 | SE_LUN(cmd)->unpacked_lun, | |
5366 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5367 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); | |
5368 | spin_unlock(&T_TASK(cmd)->t_state_lock); | |
5369 | ||
5370 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
5371 | ||
5372 | if (!(SE_LUN(cmd))) { | |
5373 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", | |
5374 | CMD_TFO(cmd)->get_task_tag(cmd), | |
5375 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | |
5376 | BUG(); | |
5377 | } | |
5378 | /* | |
5379 | * If the Storage engine still owns the iscsi_cmd_t, determine | |
5380 | * and/or stop its context. | |
5381 | */ | |
5382 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" | |
5383 | "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, | |
5384 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5385 | ||
5386 | if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { | |
5387 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5388 | continue; | |
5389 | } | |
5390 | ||
5391 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | |
5392 | "_wait_for_tasks(): SUCCESS\n", | |
5393 | SE_LUN(cmd)->unpacked_lun, | |
5394 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5395 | ||
5396 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5397 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | |
5398 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5399 | goto check_cond; | |
5400 | } | |
5401 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | |
5402 | transport_all_task_dev_remove_state(cmd); | |
5403 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5404 | ||
5405 | transport_free_dev_tasks(cmd); | |
5406 | /* | |
5407 | * The Storage engine stopped this struct se_cmd before it was | |
5408 | * send to the fabric frontend for delivery back to the | |
5409 | * Initiator Node. Return this SCSI CDB back with an | |
5410 | * CHECK_CONDITION status. | |
5411 | */ | |
5412 | check_cond: | |
5413 | transport_send_check_condition_and_sense(cmd, | |
5414 | TCM_NON_EXISTENT_LUN, 0); | |
5415 | /* | |
5416 | * If the fabric frontend is waiting for this iscsi_cmd_t to | |
5417 | * be released, notify the waiting thread now that LU has | |
5418 | * finished accessing it. | |
5419 | */ | |
5420 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5421 | if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { | |
5422 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" | |
5423 | " struct se_cmd: %p ITT: 0x%08x\n", | |
5424 | lun->unpacked_lun, | |
5425 | cmd, CMD_TFO(cmd)->get_task_tag(cmd)); | |
5426 | ||
5427 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | |
5428 | cmd_flags); | |
5429 | transport_cmd_check_stop(cmd, 1, 0); | |
5430 | complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); | |
5431 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5432 | continue; | |
5433 | } | |
5434 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | |
5435 | lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); | |
5436 | ||
5437 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | |
5438 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5439 | } | |
5440 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
5441 | } | |
5442 | ||
5443 | static int transport_clear_lun_thread(void *p) | |
5444 | { | |
5445 | struct se_lun *lun = (struct se_lun *)p; | |
5446 | ||
5447 | __transport_clear_lun_from_sessions(lun); | |
5448 | complete(&lun->lun_shutdown_comp); | |
5449 | ||
5450 | return 0; | |
5451 | } | |
5452 | ||
5453 | int transport_clear_lun_from_sessions(struct se_lun *lun) | |
5454 | { | |
5455 | struct task_struct *kt; | |
5456 | ||
5457 | kt = kthread_run(transport_clear_lun_thread, (void *)lun, | |
5458 | "tcm_cl_%u", lun->unpacked_lun); | |
5459 | if (IS_ERR(kt)) { | |
5460 | printk(KERN_ERR "Unable to start clear_lun thread\n"); | |
5461 | return -1; | |
5462 | } | |
5463 | wait_for_completion(&lun->lun_shutdown_comp); | |
5464 | ||
5465 | return 0; | |
5466 | } | |
5467 | ||
5468 | /* transport_generic_wait_for_tasks(): | |
5469 | * | |
5470 | * Called from frontend or passthrough context to wait for storage engine | |
5471 | * to pause and/or release frontend generated struct se_cmd. | |
5472 | */ | |
5473 | static void transport_generic_wait_for_tasks( | |
5474 | struct se_cmd *cmd, | |
5475 | int remove_cmd, | |
5476 | int session_reinstatement) | |
5477 | { | |
5478 | unsigned long flags; | |
5479 | ||
5480 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | |
5481 | return; | |
5482 | ||
5483 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5484 | /* | |
5485 | * If we are already stopped due to an external event (ie: LUN shutdown) | |
5486 | * sleep until the connection can have the passed struct se_cmd back. | |
5487 | * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by | |
5488 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | |
5489 | * has completed its operation on the struct se_cmd. | |
5490 | */ | |
5491 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | |
5492 | ||
5493 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" | |
5494 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe" | |
5495 | "_stop_comp); for ITT: 0x%08x\n", | |
5496 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5497 | /* | |
5498 | * There is a special case for WRITES where a FE exception + | |
5499 | * LUN shutdown means ConfigFS context is still sleeping on | |
5500 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | |
5501 | * We go ahead and up transport_lun_stop_comp just to be sure | |
5502 | * here. | |
5503 | */ | |
5504 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5505 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | |
5506 | wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | |
5507 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5508 | ||
5509 | transport_all_task_dev_remove_state(cmd); | |
5510 | /* | |
5511 | * At this point, the frontend who was the originator of this | |
5512 | * struct se_cmd, now owns the structure and can be released through | |
5513 | * normal means below. | |
5514 | */ | |
5515 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" | |
5516 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" | |
5517 | "stop_comp); for ITT: 0x%08x\n", | |
5518 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5519 | ||
5520 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | |
5521 | } | |
5522 | if (!atomic_read(&T_TASK(cmd)->t_transport_active)) | |
5523 | goto remove; | |
5524 | ||
5525 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); | |
5526 | ||
5527 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" | |
5528 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | |
5529 | " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | |
5530 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | |
5531 | cmd->deferred_t_state); | |
5532 | ||
5533 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5534 | ||
5535 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | |
5536 | ||
5537 | wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); | |
5538 | ||
5539 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5540 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | |
5541 | atomic_set(&T_TASK(cmd)->t_transport_stop, 0); | |
5542 | ||
5543 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" | |
5544 | "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", | |
5545 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5546 | remove: | |
5547 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5548 | if (!remove_cmd) | |
5549 | return; | |
5550 | ||
5551 | transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); | |
5552 | } | |
5553 | ||
5554 | static int transport_get_sense_codes( | |
5555 | struct se_cmd *cmd, | |
5556 | u8 *asc, | |
5557 | u8 *ascq) | |
5558 | { | |
5559 | *asc = cmd->scsi_asc; | |
5560 | *ascq = cmd->scsi_ascq; | |
5561 | ||
5562 | return 0; | |
5563 | } | |
5564 | ||
5565 | static int transport_set_sense_codes( | |
5566 | struct se_cmd *cmd, | |
5567 | u8 asc, | |
5568 | u8 ascq) | |
5569 | { | |
5570 | cmd->scsi_asc = asc; | |
5571 | cmd->scsi_ascq = ascq; | |
5572 | ||
5573 | return 0; | |
5574 | } | |
5575 | ||
5576 | int transport_send_check_condition_and_sense( | |
5577 | struct se_cmd *cmd, | |
5578 | u8 reason, | |
5579 | int from_transport) | |
5580 | { | |
5581 | unsigned char *buffer = cmd->sense_buffer; | |
5582 | unsigned long flags; | |
5583 | int offset; | |
5584 | u8 asc = 0, ascq = 0; | |
5585 | ||
5586 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5587 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | |
5588 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5589 | return 0; | |
5590 | } | |
5591 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | |
5592 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | |
5593 | ||
5594 | if (!reason && from_transport) | |
5595 | goto after_reason; | |
5596 | ||
5597 | if (!from_transport) | |
5598 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | |
5599 | /* | |
5600 | * Data Segment and SenseLength of the fabric response PDU. | |
5601 | * | |
5602 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | |
5603 | * from include/scsi/scsi_cmnd.h | |
5604 | */ | |
5605 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | |
5606 | TRANSPORT_SENSE_BUFFER); | |
5607 | /* | |
5608 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | |
5609 | * SENSE KEY values from include/scsi/scsi.h | |
5610 | */ | |
5611 | switch (reason) { | |
5612 | case TCM_NON_EXISTENT_LUN: | |
5613 | case TCM_UNSUPPORTED_SCSI_OPCODE: | |
5614 | case TCM_SECTOR_COUNT_TOO_MANY: | |
5615 | /* CURRENT ERROR */ | |
5616 | buffer[offset] = 0x70; | |
5617 | /* ILLEGAL REQUEST */ | |
5618 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5619 | /* INVALID COMMAND OPERATION CODE */ | |
5620 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | |
5621 | break; | |
5622 | case TCM_UNKNOWN_MODE_PAGE: | |
5623 | /* CURRENT ERROR */ | |
5624 | buffer[offset] = 0x70; | |
5625 | /* ILLEGAL REQUEST */ | |
5626 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5627 | /* INVALID FIELD IN CDB */ | |
5628 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
5629 | break; | |
5630 | case TCM_CHECK_CONDITION_ABORT_CMD: | |
5631 | /* CURRENT ERROR */ | |
5632 | buffer[offset] = 0x70; | |
5633 | /* ABORTED COMMAND */ | |
5634 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5635 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | |
5636 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | |
5637 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | |
5638 | break; | |
5639 | case TCM_INCORRECT_AMOUNT_OF_DATA: | |
5640 | /* CURRENT ERROR */ | |
5641 | buffer[offset] = 0x70; | |
5642 | /* ABORTED COMMAND */ | |
5643 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5644 | /* WRITE ERROR */ | |
5645 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
5646 | /* NOT ENOUGH UNSOLICITED DATA */ | |
5647 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | |
5648 | break; | |
5649 | case TCM_INVALID_CDB_FIELD: | |
5650 | /* CURRENT ERROR */ | |
5651 | buffer[offset] = 0x70; | |
5652 | /* ABORTED COMMAND */ | |
5653 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5654 | /* INVALID FIELD IN CDB */ | |
5655 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
5656 | break; | |
5657 | case TCM_INVALID_PARAMETER_LIST: | |
5658 | /* CURRENT ERROR */ | |
5659 | buffer[offset] = 0x70; | |
5660 | /* ABORTED COMMAND */ | |
5661 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5662 | /* INVALID FIELD IN PARAMETER LIST */ | |
5663 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | |
5664 | break; | |
5665 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | |
5666 | /* CURRENT ERROR */ | |
5667 | buffer[offset] = 0x70; | |
5668 | /* ABORTED COMMAND */ | |
5669 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5670 | /* WRITE ERROR */ | |
5671 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
5672 | /* UNEXPECTED_UNSOLICITED_DATA */ | |
5673 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | |
5674 | break; | |
5675 | case TCM_SERVICE_CRC_ERROR: | |
5676 | /* CURRENT ERROR */ | |
5677 | buffer[offset] = 0x70; | |
5678 | /* ABORTED COMMAND */ | |
5679 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5680 | /* PROTOCOL SERVICE CRC ERROR */ | |
5681 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | |
5682 | /* N/A */ | |
5683 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | |
5684 | break; | |
5685 | case TCM_SNACK_REJECTED: | |
5686 | /* CURRENT ERROR */ | |
5687 | buffer[offset] = 0x70; | |
5688 | /* ABORTED COMMAND */ | |
5689 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5690 | /* READ ERROR */ | |
5691 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | |
5692 | /* FAILED RETRANSMISSION REQUEST */ | |
5693 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | |
5694 | break; | |
5695 | case TCM_WRITE_PROTECTED: | |
5696 | /* CURRENT ERROR */ | |
5697 | buffer[offset] = 0x70; | |
5698 | /* DATA PROTECT */ | |
5699 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | |
5700 | /* WRITE PROTECTED */ | |
5701 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | |
5702 | break; | |
5703 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | |
5704 | /* CURRENT ERROR */ | |
5705 | buffer[offset] = 0x70; | |
5706 | /* UNIT ATTENTION */ | |
5707 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | |
5708 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | |
5709 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
5710 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
5711 | break; | |
5712 | case TCM_CHECK_CONDITION_NOT_READY: | |
5713 | /* CURRENT ERROR */ | |
5714 | buffer[offset] = 0x70; | |
5715 | /* Not Ready */ | |
5716 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | |
5717 | transport_get_sense_codes(cmd, &asc, &ascq); | |
5718 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
5719 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
5720 | break; | |
5721 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | |
5722 | default: | |
5723 | /* CURRENT ERROR */ | |
5724 | buffer[offset] = 0x70; | |
5725 | /* ILLEGAL REQUEST */ | |
5726 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5727 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | |
5728 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | |
5729 | break; | |
5730 | } | |
5731 | /* | |
5732 | * This code uses linux/include/scsi/scsi.h SAM status codes! | |
5733 | */ | |
5734 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | |
5735 | /* | |
5736 | * Automatically padded, this value is encoded in the fabric's | |
5737 | * data_length response PDU containing the SCSI defined sense data. | |
5738 | */ | |
5739 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | |
5740 | ||
5741 | after_reason: | |
5742 | CMD_TFO(cmd)->queue_status(cmd); | |
5743 | return 0; | |
5744 | } | |
5745 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | |
5746 | ||
5747 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |
5748 | { | |
5749 | int ret = 0; | |
5750 | ||
5751 | if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { | |
5752 | if (!(send_status) || | |
5753 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | |
5754 | return 1; | |
5755 | #if 0 | |
5756 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" | |
5757 | " status for CDB: 0x%02x ITT: 0x%08x\n", | |
5758 | T_TASK(cmd)->t_task_cdb[0], | |
5759 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5760 | #endif | |
5761 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | |
5762 | CMD_TFO(cmd)->queue_status(cmd); | |
5763 | ret = 1; | |
5764 | } | |
5765 | return ret; | |
5766 | } | |
5767 | EXPORT_SYMBOL(transport_check_aborted_status); | |
5768 | ||
5769 | void transport_send_task_abort(struct se_cmd *cmd) | |
5770 | { | |
5771 | /* | |
5772 | * If there are still expected incoming fabric WRITEs, we wait | |
5773 | * until until they have completed before sending a TASK_ABORTED | |
5774 | * response. This response with TASK_ABORTED status will be | |
5775 | * queued back to fabric module by transport_check_aborted_status(). | |
5776 | */ | |
5777 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
5778 | if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { | |
5779 | atomic_inc(&T_TASK(cmd)->t_transport_aborted); | |
5780 | smp_mb__after_atomic_inc(); | |
5781 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
5782 | transport_new_cmd_failure(cmd); | |
5783 | return; | |
5784 | } | |
5785 | } | |
5786 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
5787 | #if 0 | |
5788 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | |
5789 | " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], | |
5790 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5791 | #endif | |
5792 | CMD_TFO(cmd)->queue_status(cmd); | |
5793 | } | |
5794 | ||
5795 | /* transport_generic_do_tmr(): | |
5796 | * | |
5797 | * | |
5798 | */ | |
5799 | int transport_generic_do_tmr(struct se_cmd *cmd) | |
5800 | { | |
5801 | struct se_cmd *ref_cmd; | |
5802 | struct se_device *dev = SE_DEV(cmd); | |
5803 | struct se_tmr_req *tmr = cmd->se_tmr_req; | |
5804 | int ret; | |
5805 | ||
5806 | switch (tmr->function) { | |
5807 | case ABORT_TASK: | |
5808 | ref_cmd = tmr->ref_cmd; | |
5809 | tmr->response = TMR_FUNCTION_REJECTED; | |
5810 | break; | |
5811 | case ABORT_TASK_SET: | |
5812 | case CLEAR_ACA: | |
5813 | case CLEAR_TASK_SET: | |
5814 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | |
5815 | break; | |
5816 | case LUN_RESET: | |
5817 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | |
5818 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | |
5819 | TMR_FUNCTION_REJECTED; | |
5820 | break; | |
5821 | #if 0 | |
5822 | case TARGET_WARM_RESET: | |
5823 | transport_generic_host_reset(dev->se_hba); | |
5824 | tmr->response = TMR_FUNCTION_REJECTED; | |
5825 | break; | |
5826 | case TARGET_COLD_RESET: | |
5827 | transport_generic_host_reset(dev->se_hba); | |
5828 | transport_generic_cold_reset(dev->se_hba); | |
5829 | tmr->response = TMR_FUNCTION_REJECTED; | |
5830 | break; | |
5831 | #endif | |
5832 | default: | |
5833 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | |
5834 | tmr->function); | |
5835 | tmr->response = TMR_FUNCTION_REJECTED; | |
5836 | break; | |
5837 | } | |
5838 | ||
5839 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | |
5840 | CMD_TFO(cmd)->queue_tm_rsp(cmd); | |
5841 | ||
5842 | transport_cmd_check_stop(cmd, 2, 0); | |
5843 | return 0; | |
5844 | } | |
5845 | ||
5846 | /* | |
5847 | * Called with spin_lock_irq(&dev->execute_task_lock); held | |
5848 | * | |
5849 | */ | |
5850 | static struct se_task * | |
5851 | transport_get_task_from_state_list(struct se_device *dev) | |
5852 | { | |
5853 | struct se_task *task; | |
5854 | ||
5855 | if (list_empty(&dev->state_task_list)) | |
5856 | return NULL; | |
5857 | ||
5858 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | |
5859 | break; | |
5860 | ||
5861 | list_del(&task->t_state_list); | |
5862 | atomic_set(&task->task_state_active, 0); | |
5863 | ||
5864 | return task; | |
5865 | } | |
5866 | ||
5867 | static void transport_processing_shutdown(struct se_device *dev) | |
5868 | { | |
5869 | struct se_cmd *cmd; | |
5870 | struct se_queue_req *qr; | |
5871 | struct se_task *task; | |
5872 | u8 state; | |
5873 | unsigned long flags; | |
5874 | /* | |
5875 | * Empty the struct se_device's struct se_task state list. | |
5876 | */ | |
5877 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5878 | while ((task = transport_get_task_from_state_list(dev))) { | |
5879 | if (!(TASK_CMD(task))) { | |
5880 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | |
5881 | continue; | |
5882 | } | |
5883 | cmd = TASK_CMD(task); | |
5884 | ||
5885 | if (!T_TASK(cmd)) { | |
5886 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | |
5887 | " %p ITT: 0x%08x\n", task, cmd, | |
5888 | CMD_TFO(cmd)->get_task_tag(cmd)); | |
5889 | continue; | |
5890 | } | |
5891 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
5892 | ||
5893 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5894 | ||
5895 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," | |
5896 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" | |
5897 | " %d/%d cdb: 0x%02x\n", cmd, task, | |
5898 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, | |
5899 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, | |
5900 | cmd->t_state, cmd->deferred_t_state, | |
5901 | T_TASK(cmd)->t_task_cdb[0]); | |
5902 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" | |
5903 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | |
5904 | " t_transport_stop: %d t_transport_sent: %d\n", | |
5905 | CMD_TFO(cmd)->get_task_tag(cmd), | |
5906 | T_TASK(cmd)->t_task_cdbs, | |
5907 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | |
5908 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | |
5909 | atomic_read(&T_TASK(cmd)->t_transport_active), | |
5910 | atomic_read(&T_TASK(cmd)->t_transport_stop), | |
5911 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | |
5912 | ||
5913 | if (atomic_read(&task->task_active)) { | |
5914 | atomic_set(&task->task_stop, 1); | |
5915 | spin_unlock_irqrestore( | |
5916 | &T_TASK(cmd)->t_state_lock, flags); | |
5917 | ||
5918 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" | |
5919 | " %p\n", task, dev); | |
5920 | wait_for_completion(&task->task_stop_comp); | |
5921 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", | |
5922 | task, dev); | |
5923 | ||
5924 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | |
5925 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | |
5926 | ||
5927 | atomic_set(&task->task_active, 0); | |
5928 | atomic_set(&task->task_stop, 0); | |
5929 | } | |
5930 | __transport_stop_task_timer(task, &flags); | |
5931 | ||
5932 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | |
5933 | spin_unlock_irqrestore( | |
5934 | &T_TASK(cmd)->t_state_lock, flags); | |
5935 | ||
5936 | DEBUG_DO("Skipping task: %p, dev: %p for" | |
5937 | " t_task_cdbs_ex_left: %d\n", task, dev, | |
5938 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | |
5939 | ||
5940 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5941 | continue; | |
5942 | } | |
5943 | ||
5944 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | |
5945 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" | |
5946 | " %p\n", task, dev); | |
5947 | ||
5948 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
5949 | spin_unlock_irqrestore( | |
5950 | &T_TASK(cmd)->t_state_lock, flags); | |
5951 | transport_send_check_condition_and_sense( | |
5952 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | |
5953 | 0); | |
5954 | transport_remove_cmd_from_queue(cmd, | |
5955 | SE_DEV(cmd)->dev_queue_obj); | |
5956 | ||
5957 | transport_lun_remove_cmd(cmd); | |
5958 | transport_cmd_check_stop(cmd, 1, 0); | |
5959 | } else { | |
5960 | spin_unlock_irqrestore( | |
5961 | &T_TASK(cmd)->t_state_lock, flags); | |
5962 | ||
5963 | transport_remove_cmd_from_queue(cmd, | |
5964 | SE_DEV(cmd)->dev_queue_obj); | |
5965 | ||
5966 | transport_lun_remove_cmd(cmd); | |
5967 | ||
5968 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
5969 | transport_generic_remove(cmd, 0, 0); | |
5970 | } | |
5971 | ||
5972 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5973 | continue; | |
5974 | } | |
5975 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", | |
5976 | task, dev); | |
5977 | ||
5978 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
5979 | spin_unlock_irqrestore( | |
5980 | &T_TASK(cmd)->t_state_lock, flags); | |
5981 | transport_send_check_condition_and_sense(cmd, | |
5982 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5983 | transport_remove_cmd_from_queue(cmd, | |
5984 | SE_DEV(cmd)->dev_queue_obj); | |
5985 | ||
5986 | transport_lun_remove_cmd(cmd); | |
5987 | transport_cmd_check_stop(cmd, 1, 0); | |
5988 | } else { | |
5989 | spin_unlock_irqrestore( | |
5990 | &T_TASK(cmd)->t_state_lock, flags); | |
5991 | ||
5992 | transport_remove_cmd_from_queue(cmd, | |
5993 | SE_DEV(cmd)->dev_queue_obj); | |
5994 | transport_lun_remove_cmd(cmd); | |
5995 | ||
5996 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
5997 | transport_generic_remove(cmd, 0, 0); | |
5998 | } | |
5999 | ||
6000 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
6001 | } | |
6002 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
6003 | /* | |
6004 | * Empty the struct se_device's struct se_cmd list. | |
6005 | */ | |
6006 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
6007 | while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { | |
6008 | spin_unlock_irqrestore( | |
6009 | &dev->dev_queue_obj->cmd_queue_lock, flags); | |
6010 | cmd = (struct se_cmd *)qr->cmd; | |
6011 | state = qr->state; | |
6012 | kfree(qr); | |
6013 | ||
6014 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", | |
6015 | cmd, state); | |
6016 | ||
6017 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | |
6018 | transport_send_check_condition_and_sense(cmd, | |
6019 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
6020 | ||
6021 | transport_lun_remove_cmd(cmd); | |
6022 | transport_cmd_check_stop(cmd, 1, 0); | |
6023 | } else { | |
6024 | transport_lun_remove_cmd(cmd); | |
6025 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
6026 | transport_generic_remove(cmd, 0, 0); | |
6027 | } | |
6028 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
6029 | } | |
6030 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | |
6031 | } | |
6032 | ||
6033 | /* transport_processing_thread(): | |
6034 | * | |
6035 | * | |
6036 | */ | |
6037 | static int transport_processing_thread(void *param) | |
6038 | { | |
6039 | int ret, t_state; | |
6040 | struct se_cmd *cmd; | |
6041 | struct se_device *dev = (struct se_device *) param; | |
6042 | struct se_queue_req *qr; | |
6043 | ||
6044 | set_user_nice(current, -20); | |
6045 | ||
6046 | while (!kthread_should_stop()) { | |
6047 | ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, | |
6048 | atomic_read(&dev->dev_queue_obj->queue_cnt) || | |
6049 | kthread_should_stop()); | |
6050 | if (ret < 0) | |
6051 | goto out; | |
6052 | ||
6053 | spin_lock_irq(&dev->dev_status_lock); | |
6054 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | |
6055 | spin_unlock_irq(&dev->dev_status_lock); | |
6056 | transport_processing_shutdown(dev); | |
6057 | continue; | |
6058 | } | |
6059 | spin_unlock_irq(&dev->dev_status_lock); | |
6060 | ||
6061 | get_cmd: | |
6062 | __transport_execute_tasks(dev); | |
6063 | ||
6064 | qr = transport_get_qr_from_queue(dev->dev_queue_obj); | |
6065 | if (!(qr)) | |
6066 | continue; | |
6067 | ||
6068 | cmd = (struct se_cmd *)qr->cmd; | |
6069 | t_state = qr->state; | |
6070 | kfree(qr); | |
6071 | ||
6072 | switch (t_state) { | |
6073 | case TRANSPORT_NEW_CMD_MAP: | |
6074 | if (!(CMD_TFO(cmd)->new_cmd_map)) { | |
6075 | printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" | |
6076 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); | |
6077 | BUG(); | |
6078 | } | |
6079 | ret = CMD_TFO(cmd)->new_cmd_map(cmd); | |
6080 | if (ret < 0) { | |
6081 | cmd->transport_error_status = ret; | |
6082 | transport_generic_request_failure(cmd, NULL, | |
6083 | 0, (cmd->data_direction != | |
6084 | DMA_TO_DEVICE)); | |
6085 | break; | |
6086 | } | |
6087 | /* Fall through */ | |
6088 | case TRANSPORT_NEW_CMD: | |
6089 | ret = transport_generic_new_cmd(cmd); | |
6090 | if (ret < 0) { | |
6091 | cmd->transport_error_status = ret; | |
6092 | transport_generic_request_failure(cmd, NULL, | |
6093 | 0, (cmd->data_direction != | |
6094 | DMA_TO_DEVICE)); | |
6095 | } | |
6096 | break; | |
6097 | case TRANSPORT_PROCESS_WRITE: | |
6098 | transport_generic_process_write(cmd); | |
6099 | break; | |
6100 | case TRANSPORT_COMPLETE_OK: | |
6101 | transport_stop_all_task_timers(cmd); | |
6102 | transport_generic_complete_ok(cmd); | |
6103 | break; | |
6104 | case TRANSPORT_REMOVE: | |
6105 | transport_generic_remove(cmd, 1, 0); | |
6106 | break; | |
6107 | case TRANSPORT_PROCESS_TMR: | |
6108 | transport_generic_do_tmr(cmd); | |
6109 | break; | |
6110 | case TRANSPORT_COMPLETE_FAILURE: | |
6111 | transport_generic_request_failure(cmd, NULL, 1, 1); | |
6112 | break; | |
6113 | case TRANSPORT_COMPLETE_TIMEOUT: | |
6114 | transport_stop_all_task_timers(cmd); | |
6115 | transport_generic_request_timeout(cmd); | |
6116 | break; | |
6117 | default: | |
6118 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | |
6119 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | |
6120 | " %u\n", t_state, cmd->deferred_t_state, | |
6121 | CMD_TFO(cmd)->get_task_tag(cmd), | |
6122 | CMD_TFO(cmd)->get_cmd_state(cmd), | |
6123 | SE_LUN(cmd)->unpacked_lun); | |
6124 | BUG(); | |
6125 | } | |
6126 | ||
6127 | goto get_cmd; | |
6128 | } | |
6129 | ||
6130 | out: | |
6131 | transport_release_all_cmds(dev); | |
6132 | dev->process_thread = NULL; | |
6133 | return 0; | |
6134 | } |