1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
36 #include <asm/unaligned.h>
39 #include <scsi/scsi_common.h>
40 #include <scsi/scsi_proto.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h>
44 #include <target/target_core_fabric.h>
46 #include "target_core_internal.h"
47 #include "target_core_alua.h"
48 #include "target_core_pr.h"
49 #include "target_core_ua.h"
51 DEFINE_MUTEX(g_device_mutex
);
52 LIST_HEAD(g_device_list
);
54 static struct se_hba
*lun0_hba
;
55 /* not static, needed by tpg.c */
56 struct se_device
*g_lun0_dev
;
59 transport_lookup_cmd_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
61 struct se_lun
*se_lun
= NULL
;
62 struct se_session
*se_sess
= se_cmd
->se_sess
;
63 struct se_device
*dev
;
66 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
67 return TCM_NON_EXISTENT_LUN
;
69 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
70 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
71 if (se_cmd
->se_deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
72 struct se_dev_entry
*deve
= se_cmd
->se_deve
;
76 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
77 (deve
->lun_flags
& TRANSPORT_LUNFLAGS_READ_ONLY
)) {
78 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
79 " Access for 0x%08x\n",
80 se_cmd
->se_tfo
->get_fabric_name(),
82 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
83 return TCM_WRITE_PROTECTED
;
86 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
87 deve
->write_bytes
+= se_cmd
->data_length
;
88 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
89 deve
->read_bytes
+= se_cmd
->data_length
;
91 se_lun
= deve
->se_lun
;
92 se_cmd
->se_lun
= deve
->se_lun
;
93 se_cmd
->pr_res_key
= deve
->pr_res_key
;
94 se_cmd
->orig_fe_lun
= unpacked_lun
;
95 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
97 percpu_ref_get(&se_lun
->lun_ref
);
98 se_cmd
->lun_ref_active
= true;
100 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
104 * Use the se_portal_group->tpg_virt_lun0 to allow for
105 * REPORT_LUNS, et al to be returned when no active
106 * MappedLUN=0 exists for this Initiator Port.
108 if (unpacked_lun
!= 0) {
109 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
110 " Access for 0x%08x\n",
111 se_cmd
->se_tfo
->get_fabric_name(),
113 return TCM_NON_EXISTENT_LUN
;
116 * Force WRITE PROTECT for virtual LUN 0
118 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
119 (se_cmd
->data_direction
!= DMA_NONE
))
120 return TCM_WRITE_PROTECTED
;
122 se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
123 se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
124 se_cmd
->orig_fe_lun
= 0;
125 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
127 percpu_ref_get(&se_lun
->lun_ref
);
128 se_cmd
->lun_ref_active
= true;
131 /* Directly associate cmd with se_dev */
132 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
134 dev
= se_lun
->lun_se_dev
;
135 atomic_long_inc(&dev
->num_cmds
);
136 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
137 atomic_long_add(se_cmd
->data_length
, &dev
->write_bytes
);
138 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
139 atomic_long_add(se_cmd
->data_length
, &dev
->read_bytes
);
143 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
145 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
147 struct se_dev_entry
*deve
;
148 struct se_lun
*se_lun
= NULL
;
149 struct se_session
*se_sess
= se_cmd
->se_sess
;
150 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
153 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
156 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
157 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
158 deve
= se_cmd
->se_deve
;
160 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
161 se_tmr
->tmr_lun
= deve
->se_lun
;
162 se_cmd
->se_lun
= deve
->se_lun
;
163 se_lun
= deve
->se_lun
;
164 se_cmd
->pr_res_key
= deve
->pr_res_key
;
165 se_cmd
->orig_fe_lun
= unpacked_lun
;
167 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
170 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
171 " Access for 0x%08x\n",
172 se_cmd
->se_tfo
->get_fabric_name(),
177 /* Directly associate cmd with se_dev */
178 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
179 se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
181 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
182 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
183 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
187 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
190 * This function is called from core_scsi3_emulate_pro_register_and_move()
191 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
192 * when a matching rtpi is found.
194 struct se_dev_entry
*core_get_se_deve_from_rtpi(
195 struct se_node_acl
*nacl
,
198 struct se_dev_entry
*deve
;
200 struct se_port
*port
;
201 struct se_portal_group
*tpg
= nacl
->se_tpg
;
204 spin_lock_irq(&nacl
->device_list_lock
);
205 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
206 deve
= nacl
->device_list
[i
];
208 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
213 pr_err("%s device entries device pointer is"
214 " NULL, but Initiator has access.\n",
215 tpg
->se_tpg_tfo
->get_fabric_name());
220 pr_err("%s device entries device pointer is"
221 " NULL, but Initiator has access.\n",
222 tpg
->se_tpg_tfo
->get_fabric_name());
225 if (port
->sep_rtpi
!= rtpi
)
228 atomic_inc_mb(&deve
->pr_ref_count
);
229 spin_unlock_irq(&nacl
->device_list_lock
);
233 spin_unlock_irq(&nacl
->device_list_lock
);
238 int core_free_device_list_for_node(
239 struct se_node_acl
*nacl
,
240 struct se_portal_group
*tpg
)
242 struct se_dev_entry
*deve
;
246 if (!nacl
->device_list
)
249 spin_lock_irq(&nacl
->device_list_lock
);
250 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
251 deve
= nacl
->device_list
[i
];
253 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
257 pr_err("%s device entries device pointer is"
258 " NULL, but Initiator has access.\n",
259 tpg
->se_tpg_tfo
->get_fabric_name());
264 spin_unlock_irq(&nacl
->device_list_lock
);
265 core_disable_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
266 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
267 spin_lock_irq(&nacl
->device_list_lock
);
269 spin_unlock_irq(&nacl
->device_list_lock
);
271 array_free(nacl
->device_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
272 nacl
->device_list
= NULL
;
277 void core_update_device_list_access(
280 struct se_node_acl
*nacl
)
282 struct se_dev_entry
*deve
;
284 spin_lock_irq(&nacl
->device_list_lock
);
285 deve
= nacl
->device_list
[mapped_lun
];
286 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
287 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
288 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
290 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
291 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
293 spin_unlock_irq(&nacl
->device_list_lock
);
296 /* core_enable_device_list_for_node():
300 int core_enable_device_list_for_node(
302 struct se_lun_acl
*lun_acl
,
305 struct se_node_acl
*nacl
,
306 struct se_portal_group
*tpg
)
308 struct se_port
*port
= lun
->lun_sep
;
309 struct se_dev_entry
*deve
;
311 spin_lock_irq(&nacl
->device_list_lock
);
313 deve
= nacl
->device_list
[mapped_lun
];
316 * Check if the call is handling demo mode -> explicit LUN ACL
317 * transition. This transition must be for the same struct se_lun
318 * + mapped_lun that was setup in demo mode..
320 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
321 if (deve
->se_lun_acl
!= NULL
) {
322 pr_err("struct se_dev_entry->se_lun_acl"
323 " already set for demo mode -> explicit"
324 " LUN ACL transition\n");
325 spin_unlock_irq(&nacl
->device_list_lock
);
328 if (deve
->se_lun
!= lun
) {
329 pr_err("struct se_dev_entry->se_lun does"
330 " match passed struct se_lun for demo mode"
331 " -> explicit LUN ACL transition\n");
332 spin_unlock_irq(&nacl
->device_list_lock
);
335 deve
->se_lun_acl
= lun_acl
;
337 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
338 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
339 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
341 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
342 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
345 spin_unlock_irq(&nacl
->device_list_lock
);
350 deve
->se_lun_acl
= lun_acl
;
351 deve
->mapped_lun
= mapped_lun
;
352 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
354 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
355 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
356 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
358 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
359 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
362 deve
->creation_time
= get_jiffies_64();
363 deve
->attach_count
++;
364 spin_unlock_irq(&nacl
->device_list_lock
);
366 spin_lock_bh(&port
->sep_alua_lock
);
367 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
368 spin_unlock_bh(&port
->sep_alua_lock
);
373 /* core_disable_device_list_for_node():
377 int core_disable_device_list_for_node(
379 struct se_lun_acl
*lun_acl
,
382 struct se_node_acl
*nacl
,
383 struct se_portal_group
*tpg
)
385 struct se_port
*port
= lun
->lun_sep
;
386 struct se_dev_entry
*deve
= nacl
->device_list
[mapped_lun
];
389 * If the MappedLUN entry is being disabled, the entry in
390 * port->sep_alua_list must be removed now before clearing the
391 * struct se_dev_entry pointers below as logic in
392 * core_alua_do_transition_tg_pt() depends on these being present.
394 * deve->se_lun_acl will be NULL for demo-mode created LUNs
395 * that have not been explicitly converted to MappedLUNs ->
396 * struct se_lun_acl, but we remove deve->alua_port_list from
397 * port->sep_alua_list. This also means that active UAs and
398 * NodeACL context specific PR metadata for demo-mode
399 * MappedLUN *deve will be released below..
401 spin_lock_bh(&port
->sep_alua_lock
);
402 list_del(&deve
->alua_port_list
);
403 spin_unlock_bh(&port
->sep_alua_lock
);
405 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
406 * PR operation to complete.
408 while (atomic_read(&deve
->pr_ref_count
) != 0)
411 spin_lock_irq(&nacl
->device_list_lock
);
413 * Disable struct se_dev_entry LUN ACL mapping
415 core_scsi3_ua_release_all(deve
);
417 deve
->se_lun_acl
= NULL
;
419 deve
->creation_time
= 0;
420 deve
->attach_count
--;
421 spin_unlock_irq(&nacl
->device_list_lock
);
423 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
427 /* core_clear_lun_from_tpg():
431 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
433 struct se_node_acl
*nacl
;
434 struct se_dev_entry
*deve
;
437 spin_lock_irq(&tpg
->acl_node_lock
);
438 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
439 spin_unlock_irq(&tpg
->acl_node_lock
);
441 spin_lock_irq(&nacl
->device_list_lock
);
442 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
443 deve
= nacl
->device_list
[i
];
444 if (lun
!= deve
->se_lun
)
446 spin_unlock_irq(&nacl
->device_list_lock
);
448 core_disable_device_list_for_node(lun
, NULL
,
449 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
452 spin_lock_irq(&nacl
->device_list_lock
);
454 spin_unlock_irq(&nacl
->device_list_lock
);
456 spin_lock_irq(&tpg
->acl_node_lock
);
458 spin_unlock_irq(&tpg
->acl_node_lock
);
461 static struct se_port
*core_alloc_port(struct se_device
*dev
)
463 struct se_port
*port
, *port_tmp
;
465 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
467 pr_err("Unable to allocate struct se_port\n");
468 return ERR_PTR(-ENOMEM
);
470 INIT_LIST_HEAD(&port
->sep_alua_list
);
471 INIT_LIST_HEAD(&port
->sep_list
);
472 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
473 spin_lock_init(&port
->sep_alua_lock
);
474 mutex_init(&port
->sep_tg_pt_md_mutex
);
476 spin_lock(&dev
->se_port_lock
);
477 if (dev
->dev_port_count
== 0x0000ffff) {
478 pr_warn("Reached dev->dev_port_count =="
480 spin_unlock(&dev
->se_port_lock
);
481 return ERR_PTR(-ENOSPC
);
485 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
486 * Here is the table from spc4r17 section 7.7.3.8.
488 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
492 * 1h Relative port 1, historically known as port A
493 * 2h Relative port 2, historically known as port B
494 * 3h to FFFFh Relative port 3 through 65 535
496 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
500 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
502 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
505 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
508 spin_unlock(&dev
->se_port_lock
);
513 static void core_export_port(
514 struct se_device
*dev
,
515 struct se_portal_group
*tpg
,
516 struct se_port
*port
,
519 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
521 spin_lock(&dev
->se_port_lock
);
522 spin_lock(&lun
->lun_sep_lock
);
526 spin_unlock(&lun
->lun_sep_lock
);
528 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
529 spin_unlock(&dev
->se_port_lock
);
531 if (!(dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
) &&
532 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
)) {
533 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
534 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
535 pr_err("Unable to allocate t10_alua_tg_pt"
539 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
540 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
541 dev
->t10_alua
.default_tg_pt_gp
);
542 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
543 pr_debug("%s/%s: Adding to default ALUA Target Port"
544 " Group: alua/default_tg_pt_gp\n",
545 dev
->transport
->name
, tpg
->se_tpg_tfo
->get_fabric_name());
548 dev
->dev_port_count
++;
549 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFIER */
553 * Called with struct se_device->se_port_lock spinlock held.
555 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
556 __releases(&dev
->se_port_lock
) __acquires(&dev
->se_port_lock
)
559 * Wait for any port reference for PR ALL_TG_PT=1 operation
560 * to complete in __core_scsi3_alloc_registration()
562 spin_unlock(&dev
->se_port_lock
);
563 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
565 spin_lock(&dev
->se_port_lock
);
567 core_alua_free_tg_pt_gp_mem(port
);
569 list_del(&port
->sep_list
);
570 dev
->dev_port_count
--;
575 struct se_device
*dev
,
576 struct se_portal_group
*tpg
,
579 struct se_hba
*hba
= dev
->se_hba
;
580 struct se_port
*port
;
582 port
= core_alloc_port(dev
);
584 return PTR_ERR(port
);
586 lun
->lun_se_dev
= dev
;
588 spin_lock(&hba
->device_lock
);
590 spin_unlock(&hba
->device_lock
);
592 core_export_port(dev
, tpg
, port
, lun
);
596 void core_dev_unexport(
597 struct se_device
*dev
,
598 struct se_portal_group
*tpg
,
601 struct se_hba
*hba
= dev
->se_hba
;
602 struct se_port
*port
= lun
->lun_sep
;
604 spin_lock(&lun
->lun_sep_lock
);
605 if (lun
->lun_se_dev
== NULL
) {
606 spin_unlock(&lun
->lun_sep_lock
);
609 spin_unlock(&lun
->lun_sep_lock
);
611 spin_lock(&dev
->se_port_lock
);
612 core_release_port(dev
, port
);
613 spin_unlock(&dev
->se_port_lock
);
615 spin_lock(&hba
->device_lock
);
617 spin_unlock(&hba
->device_lock
);
620 lun
->lun_se_dev
= NULL
;
623 static void se_release_vpd_for_dev(struct se_device
*dev
)
625 struct t10_vpd
*vpd
, *vpd_tmp
;
627 spin_lock(&dev
->t10_wwn
.t10_vpd_lock
);
628 list_for_each_entry_safe(vpd
, vpd_tmp
,
629 &dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
630 list_del(&vpd
->vpd_list
);
633 spin_unlock(&dev
->t10_wwn
.t10_vpd_lock
);
636 static u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
638 u32 aligned_max_sectors
;
641 * Limit max_sectors to a PAGE_SIZE aligned value for modern
642 * transport_allocate_data_tasks() operation.
644 alignment
= max(1ul, PAGE_SIZE
/ block_size
);
645 aligned_max_sectors
= rounddown(max_sectors
, alignment
);
647 if (max_sectors
!= aligned_max_sectors
)
648 pr_info("Rounding down aligned max_sectors from %u to %u\n",
649 max_sectors
, aligned_max_sectors
);
651 return aligned_max_sectors
;
654 bool se_dev_check_wce(struct se_device
*dev
)
658 if (dev
->transport
->get_write_cache
)
659 wce
= dev
->transport
->get_write_cache(dev
);
660 else if (dev
->dev_attrib
.emulate_write_cache
> 0)
666 int se_dev_set_max_unmap_lba_count(
667 struct se_device
*dev
,
668 u32 max_unmap_lba_count
)
670 dev
->dev_attrib
.max_unmap_lba_count
= max_unmap_lba_count
;
671 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
672 dev
, dev
->dev_attrib
.max_unmap_lba_count
);
675 EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count
);
677 int se_dev_set_max_unmap_block_desc_count(
678 struct se_device
*dev
,
679 u32 max_unmap_block_desc_count
)
681 dev
->dev_attrib
.max_unmap_block_desc_count
=
682 max_unmap_block_desc_count
;
683 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
684 dev
, dev
->dev_attrib
.max_unmap_block_desc_count
);
687 EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count
);
689 int se_dev_set_unmap_granularity(
690 struct se_device
*dev
,
691 u32 unmap_granularity
)
693 dev
->dev_attrib
.unmap_granularity
= unmap_granularity
;
694 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
695 dev
, dev
->dev_attrib
.unmap_granularity
);
698 EXPORT_SYMBOL(se_dev_set_unmap_granularity
);
700 int se_dev_set_unmap_granularity_alignment(
701 struct se_device
*dev
,
702 u32 unmap_granularity_alignment
)
704 dev
->dev_attrib
.unmap_granularity_alignment
= unmap_granularity_alignment
;
705 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
706 dev
, dev
->dev_attrib
.unmap_granularity_alignment
);
709 EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment
);
711 int se_dev_set_max_write_same_len(
712 struct se_device
*dev
,
713 u32 max_write_same_len
)
715 dev
->dev_attrib
.max_write_same_len
= max_write_same_len
;
716 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
717 dev
, dev
->dev_attrib
.max_write_same_len
);
720 EXPORT_SYMBOL(se_dev_set_max_write_same_len
);
722 static void dev_set_t10_wwn_model_alias(struct se_device
*dev
)
724 const char *configname
;
726 configname
= config_item_name(&dev
->dev_group
.cg_item
);
727 if (strlen(configname
) >= 16) {
728 pr_warn("dev[%p]: Backstore name '%s' is too long for "
729 "INQUIRY_MODEL, truncating to 16 bytes\n", dev
,
732 snprintf(&dev
->t10_wwn
.model
[0], 16, "%s", configname
);
735 int se_dev_set_emulate_model_alias(struct se_device
*dev
, int flag
)
737 if (dev
->export_count
) {
738 pr_err("dev[%p]: Unable to change model alias"
739 " while export_count is %d\n",
740 dev
, dev
->export_count
);
744 if (flag
!= 0 && flag
!= 1) {
745 pr_err("Illegal value %d\n", flag
);
750 dev_set_t10_wwn_model_alias(dev
);
752 strncpy(&dev
->t10_wwn
.model
[0],
753 dev
->transport
->inquiry_prod
, 16);
755 dev
->dev_attrib
.emulate_model_alias
= flag
;
759 EXPORT_SYMBOL(se_dev_set_emulate_model_alias
);
761 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
763 if (flag
!= 0 && flag
!= 1) {
764 pr_err("Illegal value %d\n", flag
);
769 pr_err("dpo_emulated not supported\n");
775 EXPORT_SYMBOL(se_dev_set_emulate_dpo
);
777 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
779 if (flag
!= 0 && flag
!= 1) {
780 pr_err("Illegal value %d\n", flag
);
784 dev
->transport
->get_write_cache
) {
785 pr_warn("emulate_fua_write not supported for this device, ignoring\n");
788 if (dev
->export_count
) {
789 pr_err("emulate_fua_write cannot be changed with active"
790 " exports: %d\n", dev
->export_count
);
793 dev
->dev_attrib
.emulate_fua_write
= flag
;
794 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
795 dev
, dev
->dev_attrib
.emulate_fua_write
);
798 EXPORT_SYMBOL(se_dev_set_emulate_fua_write
);
800 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
802 if (flag
!= 0 && flag
!= 1) {
803 pr_err("Illegal value %d\n", flag
);
808 pr_err("ua read emulated not supported\n");
814 EXPORT_SYMBOL(se_dev_set_emulate_fua_read
);
816 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
818 if (flag
!= 0 && flag
!= 1) {
819 pr_err("Illegal value %d\n", flag
);
823 dev
->transport
->get_write_cache
) {
824 pr_err("emulate_write_cache not supported for this device\n");
827 if (dev
->export_count
) {
828 pr_err("emulate_write_cache cannot be changed with active"
829 " exports: %d\n", dev
->export_count
);
832 dev
->dev_attrib
.emulate_write_cache
= flag
;
833 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
834 dev
, dev
->dev_attrib
.emulate_write_cache
);
837 EXPORT_SYMBOL(se_dev_set_emulate_write_cache
);
839 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
841 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
842 pr_err("Illegal value %d\n", flag
);
846 if (dev
->export_count
) {
847 pr_err("dev[%p]: Unable to change SE Device"
848 " UA_INTRLCK_CTRL while export_count is %d\n",
849 dev
, dev
->export_count
);
852 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= flag
;
853 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
854 dev
, dev
->dev_attrib
.emulate_ua_intlck_ctrl
);
858 EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl
);
860 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
862 if ((flag
!= 0) && (flag
!= 1)) {
863 pr_err("Illegal value %d\n", flag
);
867 if (dev
->export_count
) {
868 pr_err("dev[%p]: Unable to change SE Device TAS while"
869 " export_count is %d\n",
870 dev
, dev
->export_count
);
873 dev
->dev_attrib
.emulate_tas
= flag
;
874 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
875 dev
, (dev
->dev_attrib
.emulate_tas
) ? "Enabled" : "Disabled");
879 EXPORT_SYMBOL(se_dev_set_emulate_tas
);
881 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
883 if ((flag
!= 0) && (flag
!= 1)) {
884 pr_err("Illegal value %d\n", flag
);
888 * We expect this value to be non-zero when generic Block Layer
889 * Discard supported is detected iblock_create_virtdevice().
891 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
892 pr_err("Generic Block Discard not supported\n");
896 dev
->dev_attrib
.emulate_tpu
= flag
;
897 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
901 EXPORT_SYMBOL(se_dev_set_emulate_tpu
);
903 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
905 if ((flag
!= 0) && (flag
!= 1)) {
906 pr_err("Illegal value %d\n", flag
);
910 * We expect this value to be non-zero when generic Block Layer
911 * Discard supported is detected iblock_create_virtdevice().
913 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
914 pr_err("Generic Block Discard not supported\n");
918 dev
->dev_attrib
.emulate_tpws
= flag
;
919 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
923 EXPORT_SYMBOL(se_dev_set_emulate_tpws
);
925 int se_dev_set_emulate_caw(struct se_device
*dev
, int flag
)
927 if (flag
!= 0 && flag
!= 1) {
928 pr_err("Illegal value %d\n", flag
);
931 dev
->dev_attrib
.emulate_caw
= flag
;
932 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
937 EXPORT_SYMBOL(se_dev_set_emulate_caw
);
939 int se_dev_set_emulate_3pc(struct se_device
*dev
, int flag
)
941 if (flag
!= 0 && flag
!= 1) {
942 pr_err("Illegal value %d\n", flag
);
945 dev
->dev_attrib
.emulate_3pc
= flag
;
946 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
951 EXPORT_SYMBOL(se_dev_set_emulate_3pc
);
953 int se_dev_set_pi_prot_type(struct se_device
*dev
, int flag
)
955 int rc
, old_prot
= dev
->dev_attrib
.pi_prot_type
;
957 if (flag
!= 0 && flag
!= 1 && flag
!= 2 && flag
!= 3) {
958 pr_err("Illegal value %d for pi_prot_type\n", flag
);
962 pr_err("DIF TYPE2 protection currently not supported\n");
965 if (dev
->dev_attrib
.hw_pi_prot_type
) {
966 pr_warn("DIF protection enabled on underlying hardware,"
970 if (!dev
->transport
->init_prot
|| !dev
->transport
->free_prot
) {
971 /* 0 is only allowed value for non-supporting backends */
975 pr_err("DIF protection not supported by backend: %s\n",
976 dev
->transport
->name
);
979 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
980 pr_err("DIF protection requires device to be configured\n");
983 if (dev
->export_count
) {
984 pr_err("dev[%p]: Unable to change SE Device PROT type while"
985 " export_count is %d\n", dev
, dev
->export_count
);
989 dev
->dev_attrib
.pi_prot_type
= flag
;
991 if (flag
&& !old_prot
) {
992 rc
= dev
->transport
->init_prot(dev
);
994 dev
->dev_attrib
.pi_prot_type
= old_prot
;
998 } else if (!flag
&& old_prot
) {
999 dev
->transport
->free_prot(dev
);
1001 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev
, flag
);
1005 EXPORT_SYMBOL(se_dev_set_pi_prot_type
);
1007 int se_dev_set_pi_prot_format(struct se_device
*dev
, int flag
)
1015 pr_err("Illegal value %d for pi_prot_format\n", flag
);
1018 if (!dev
->transport
->format_prot
) {
1019 pr_err("DIF protection format not supported by backend %s\n",
1020 dev
->transport
->name
);
1023 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
1024 pr_err("DIF protection format requires device to be configured\n");
1027 if (dev
->export_count
) {
1028 pr_err("dev[%p]: Unable to format SE Device PROT type while"
1029 " export_count is %d\n", dev
, dev
->export_count
);
1033 rc
= dev
->transport
->format_prot(dev
);
1037 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev
);
1041 EXPORT_SYMBOL(se_dev_set_pi_prot_format
);
1043 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1045 if ((flag
!= 0) && (flag
!= 1)) {
1046 pr_err("Illegal value %d\n", flag
);
1049 dev
->dev_attrib
.enforce_pr_isids
= flag
;
1050 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1051 (dev
->dev_attrib
.enforce_pr_isids
) ? "Enabled" : "Disabled");
1054 EXPORT_SYMBOL(se_dev_set_enforce_pr_isids
);
1056 int se_dev_set_force_pr_aptpl(struct se_device
*dev
, int flag
)
1058 if ((flag
!= 0) && (flag
!= 1)) {
1059 printk(KERN_ERR
"Illegal value %d\n", flag
);
1062 if (dev
->export_count
) {
1063 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1064 " export_count is %d\n", dev
, dev
->export_count
);
1068 dev
->dev_attrib
.force_pr_aptpl
= flag
;
1069 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev
, flag
);
1072 EXPORT_SYMBOL(se_dev_set_force_pr_aptpl
);
1074 int se_dev_set_is_nonrot(struct se_device
*dev
, int flag
)
1076 if ((flag
!= 0) && (flag
!= 1)) {
1077 printk(KERN_ERR
"Illegal value %d\n", flag
);
1080 dev
->dev_attrib
.is_nonrot
= flag
;
1081 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1085 EXPORT_SYMBOL(se_dev_set_is_nonrot
);
1087 int se_dev_set_emulate_rest_reord(struct se_device
*dev
, int flag
)
1090 printk(KERN_ERR
"dev[%p]: SE Device emulatation of restricted"
1091 " reordering not implemented\n", dev
);
1094 dev
->dev_attrib
.emulate_rest_reord
= flag
;
1095 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev
, flag
);
1098 EXPORT_SYMBOL(se_dev_set_emulate_rest_reord
);
1101 * Note, this can only be called on unexported SE Device Object.
1103 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1105 if (dev
->export_count
) {
1106 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1107 " export_count is %d\n",
1108 dev
, dev
->export_count
);
1112 pr_err("dev[%p]: Illegal ZERO value for queue"
1117 if (queue_depth
> dev
->dev_attrib
.queue_depth
) {
1118 if (queue_depth
> dev
->dev_attrib
.hw_queue_depth
) {
1119 pr_err("dev[%p]: Passed queue_depth:"
1120 " %u exceeds TCM/SE_Device MAX"
1121 " TCQ: %u\n", dev
, queue_depth
,
1122 dev
->dev_attrib
.hw_queue_depth
);
1126 dev
->dev_attrib
.queue_depth
= dev
->queue_depth
= queue_depth
;
1127 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1131 EXPORT_SYMBOL(se_dev_set_queue_depth
);
1133 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1135 if (dev
->export_count
) {
1136 pr_err("dev[%p]: Unable to change SE Device"
1137 " optimal_sectors while export_count is %d\n",
1138 dev
, dev
->export_count
);
1141 if (optimal_sectors
> dev
->dev_attrib
.hw_max_sectors
) {
1142 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1143 " greater than hw_max_sectors: %u\n", dev
,
1144 optimal_sectors
, dev
->dev_attrib
.hw_max_sectors
);
1148 dev
->dev_attrib
.optimal_sectors
= optimal_sectors
;
1149 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1150 dev
, optimal_sectors
);
1153 EXPORT_SYMBOL(se_dev_set_optimal_sectors
);
1155 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1157 if (dev
->export_count
) {
1158 pr_err("dev[%p]: Unable to change SE Device block_size"
1159 " while export_count is %d\n",
1160 dev
, dev
->export_count
);
1164 if ((block_size
!= 512) &&
1165 (block_size
!= 1024) &&
1166 (block_size
!= 2048) &&
1167 (block_size
!= 4096)) {
1168 pr_err("dev[%p]: Illegal value for block_device: %u"
1169 " for SE device, must be 512, 1024, 2048 or 4096\n",
1174 dev
->dev_attrib
.block_size
= block_size
;
1175 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1178 if (dev
->dev_attrib
.max_bytes_per_io
)
1179 dev
->dev_attrib
.hw_max_sectors
=
1180 dev
->dev_attrib
.max_bytes_per_io
/ block_size
;
1184 EXPORT_SYMBOL(se_dev_set_block_size
);
1186 struct se_lun
*core_dev_add_lun(
1187 struct se_portal_group
*tpg
,
1188 struct se_device
*dev
,
1194 lun
= core_tpg_alloc_lun(tpg
, unpacked_lun
);
1198 rc
= core_tpg_add_lun(tpg
, lun
,
1199 TRANSPORT_LUNFLAGS_READ_WRITE
, dev
);
1203 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1204 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1205 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1206 tpg
->se_tpg_tfo
->get_fabric_name(), dev
->se_hba
->hba_id
);
1208 * Update LUN maps for dynamically added initiators when
1209 * generate_node_acl is enabled.
1211 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
1212 struct se_node_acl
*acl
;
1213 spin_lock_irq(&tpg
->acl_node_lock
);
1214 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1215 if (acl
->dynamic_node_acl
&&
1216 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
1217 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
1218 spin_unlock_irq(&tpg
->acl_node_lock
);
1219 core_tpg_add_node_to_devs(acl
, tpg
);
1220 spin_lock_irq(&tpg
->acl_node_lock
);
1223 spin_unlock_irq(&tpg
->acl_node_lock
);
1229 /* core_dev_del_lun():
1233 void core_dev_del_lun(
1234 struct se_portal_group
*tpg
,
1237 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1238 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1239 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1240 tpg
->se_tpg_tfo
->get_fabric_name());
1242 core_tpg_remove_lun(tpg
, lun
);
1245 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1249 spin_lock(&tpg
->tpg_lun_lock
);
1250 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1251 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1252 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1253 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1254 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1255 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1256 spin_unlock(&tpg
->tpg_lun_lock
);
1259 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1261 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1262 pr_err("%s Logical Unit Number: %u is not free on"
1263 " Target Portal Group: %hu, ignoring request.\n",
1264 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1265 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1266 spin_unlock(&tpg
->tpg_lun_lock
);
1269 spin_unlock(&tpg
->tpg_lun_lock
);
1274 /* core_dev_get_lun():
1278 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1282 spin_lock(&tpg
->tpg_lun_lock
);
1283 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1284 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1285 "_TPG-1: %u for Target Portal Group: %hu\n",
1286 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1287 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1288 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1289 spin_unlock(&tpg
->tpg_lun_lock
);
1292 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1294 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1295 pr_err("%s Logical Unit Number: %u is not active on"
1296 " Target Portal Group: %hu, ignoring request.\n",
1297 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1298 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1299 spin_unlock(&tpg
->tpg_lun_lock
);
1302 spin_unlock(&tpg
->tpg_lun_lock
);
1307 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1308 struct se_portal_group
*tpg
,
1309 struct se_node_acl
*nacl
,
1313 struct se_lun_acl
*lacl
;
1315 if (strlen(nacl
->initiatorname
) >= TRANSPORT_IQN_LEN
) {
1316 pr_err("%s InitiatorName exceeds maximum size.\n",
1317 tpg
->se_tpg_tfo
->get_fabric_name());
1321 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1323 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1328 INIT_LIST_HEAD(&lacl
->lacl_list
);
1329 lacl
->mapped_lun
= mapped_lun
;
1330 lacl
->se_lun_nacl
= nacl
;
1331 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s",
1332 nacl
->initiatorname
);
1337 int core_dev_add_initiator_node_lun_acl(
1338 struct se_portal_group
*tpg
,
1339 struct se_lun_acl
*lacl
,
1344 struct se_node_acl
*nacl
;
1346 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1348 pr_err("%s Logical Unit Number: %u is not active on"
1349 " Target Portal Group: %hu, ignoring request.\n",
1350 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1351 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1355 nacl
= lacl
->se_lun_nacl
;
1359 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1360 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1361 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1365 if (core_enable_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1366 lun_access
, nacl
, tpg
) < 0)
1369 spin_lock(&lun
->lun_acl_lock
);
1370 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1371 atomic_inc_mb(&lun
->lun_acl_count
);
1372 spin_unlock(&lun
->lun_acl_lock
);
1374 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1375 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1376 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1377 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1378 lacl
->initiatorname
);
1380 * Check to see if there are any existing persistent reservation APTPL
1381 * pre-registrations that need to be enabled for this LUN ACL..
1383 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, nacl
,
1388 /* core_dev_del_initiator_node_lun_acl():
1392 int core_dev_del_initiator_node_lun_acl(
1393 struct se_portal_group
*tpg
,
1395 struct se_lun_acl
*lacl
)
1397 struct se_node_acl
*nacl
;
1399 nacl
= lacl
->se_lun_nacl
;
1403 spin_lock(&lun
->lun_acl_lock
);
1404 list_del(&lacl
->lacl_list
);
1405 atomic_dec_mb(&lun
->lun_acl_count
);
1406 spin_unlock(&lun
->lun_acl_lock
);
1408 core_disable_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1409 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
1411 lacl
->se_lun
= NULL
;
1413 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1414 " InitiatorNode: %s Mapped LUN: %u\n",
1415 tpg
->se_tpg_tfo
->get_fabric_name(),
1416 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1417 lacl
->initiatorname
, lacl
->mapped_lun
);
1422 void core_dev_free_initiator_node_lun_acl(
1423 struct se_portal_group
*tpg
,
1424 struct se_lun_acl
*lacl
)
1426 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1427 " Mapped LUN: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1428 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
1429 tpg
->se_tpg_tfo
->get_fabric_name(),
1430 lacl
->initiatorname
, lacl
->mapped_lun
);
1435 static void scsi_dump_inquiry(struct se_device
*dev
)
1437 struct t10_wwn
*wwn
= &dev
->t10_wwn
;
1441 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1443 for (i
= 0; i
< 8; i
++)
1444 if (wwn
->vendor
[i
] >= 0x20)
1445 buf
[i
] = wwn
->vendor
[i
];
1449 pr_debug(" Vendor: %s\n", buf
);
1451 for (i
= 0; i
< 16; i
++)
1452 if (wwn
->model
[i
] >= 0x20)
1453 buf
[i
] = wwn
->model
[i
];
1457 pr_debug(" Model: %s\n", buf
);
1459 for (i
= 0; i
< 4; i
++)
1460 if (wwn
->revision
[i
] >= 0x20)
1461 buf
[i
] = wwn
->revision
[i
];
1465 pr_debug(" Revision: %s\n", buf
);
1467 device_type
= dev
->transport
->get_device_type(dev
);
1468 pr_debug(" Type: %s ", scsi_device_type(device_type
));
1471 struct se_device
*target_alloc_device(struct se_hba
*hba
, const char *name
)
1473 struct se_device
*dev
;
1474 struct se_lun
*xcopy_lun
;
1476 dev
= hba
->transport
->alloc_device(hba
, name
);
1480 dev
->dev_link_magic
= SE_DEV_LINK_MAGIC
;
1482 dev
->transport
= hba
->transport
;
1483 dev
->prot_length
= sizeof(struct se_dif_v1_tuple
);
1485 INIT_LIST_HEAD(&dev
->dev_list
);
1486 INIT_LIST_HEAD(&dev
->dev_sep_list
);
1487 INIT_LIST_HEAD(&dev
->dev_tmr_list
);
1488 INIT_LIST_HEAD(&dev
->delayed_cmd_list
);
1489 INIT_LIST_HEAD(&dev
->state_list
);
1490 INIT_LIST_HEAD(&dev
->qf_cmd_list
);
1491 INIT_LIST_HEAD(&dev
->g_dev_node
);
1492 spin_lock_init(&dev
->execute_task_lock
);
1493 spin_lock_init(&dev
->delayed_cmd_lock
);
1494 spin_lock_init(&dev
->dev_reservation_lock
);
1495 spin_lock_init(&dev
->se_port_lock
);
1496 spin_lock_init(&dev
->se_tmr_lock
);
1497 spin_lock_init(&dev
->qf_cmd_lock
);
1498 sema_init(&dev
->caw_sem
, 1);
1499 atomic_set(&dev
->dev_ordered_id
, 0);
1500 INIT_LIST_HEAD(&dev
->t10_wwn
.t10_vpd_list
);
1501 spin_lock_init(&dev
->t10_wwn
.t10_vpd_lock
);
1502 INIT_LIST_HEAD(&dev
->t10_pr
.registration_list
);
1503 INIT_LIST_HEAD(&dev
->t10_pr
.aptpl_reg_list
);
1504 spin_lock_init(&dev
->t10_pr
.registration_lock
);
1505 spin_lock_init(&dev
->t10_pr
.aptpl_reg_lock
);
1506 INIT_LIST_HEAD(&dev
->t10_alua
.tg_pt_gps_list
);
1507 spin_lock_init(&dev
->t10_alua
.tg_pt_gps_lock
);
1508 INIT_LIST_HEAD(&dev
->t10_alua
.lba_map_list
);
1509 spin_lock_init(&dev
->t10_alua
.lba_map_lock
);
1511 dev
->t10_wwn
.t10_dev
= dev
;
1512 dev
->t10_alua
.t10_dev
= dev
;
1514 dev
->dev_attrib
.da_dev
= dev
;
1515 dev
->dev_attrib
.emulate_model_alias
= DA_EMULATE_MODEL_ALIAS
;
1516 dev
->dev_attrib
.emulate_dpo
= DA_EMULATE_DPO
;
1517 dev
->dev_attrib
.emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
1518 dev
->dev_attrib
.emulate_fua_read
= DA_EMULATE_FUA_READ
;
1519 dev
->dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
1520 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
1521 dev
->dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
1522 dev
->dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
1523 dev
->dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
1524 dev
->dev_attrib
.emulate_caw
= DA_EMULATE_CAW
;
1525 dev
->dev_attrib
.emulate_3pc
= DA_EMULATE_3PC
;
1526 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE0_PROT
;
1527 dev
->dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
1528 dev
->dev_attrib
.force_pr_aptpl
= DA_FORCE_PR_APTPL
;
1529 dev
->dev_attrib
.is_nonrot
= DA_IS_NONROT
;
1530 dev
->dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
1531 dev
->dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
1532 dev
->dev_attrib
.max_unmap_block_desc_count
=
1533 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
1534 dev
->dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
1535 dev
->dev_attrib
.unmap_granularity_alignment
=
1536 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
1537 dev
->dev_attrib
.max_write_same_len
= DA_MAX_WRITE_SAME_LEN
;
1539 xcopy_lun
= &dev
->xcopy_lun
;
1540 xcopy_lun
->lun_se_dev
= dev
;
1541 init_completion(&xcopy_lun
->lun_shutdown_comp
);
1542 INIT_LIST_HEAD(&xcopy_lun
->lun_acl_list
);
1543 spin_lock_init(&xcopy_lun
->lun_acl_lock
);
1544 spin_lock_init(&xcopy_lun
->lun_sep_lock
);
1545 init_completion(&xcopy_lun
->lun_ref_comp
);
1550 int target_configure_device(struct se_device
*dev
)
1552 struct se_hba
*hba
= dev
->se_hba
;
1555 if (dev
->dev_flags
& DF_CONFIGURED
) {
1556 pr_err("se_dev->se_dev_ptr already set for storage"
1561 ret
= dev
->transport
->configure_device(dev
);
1565 * XXX: there is not much point to have two different values here..
1567 dev
->dev_attrib
.block_size
= dev
->dev_attrib
.hw_block_size
;
1568 dev
->dev_attrib
.queue_depth
= dev
->dev_attrib
.hw_queue_depth
;
1571 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1573 dev
->dev_attrib
.hw_max_sectors
=
1574 se_dev_align_max_sectors(dev
->dev_attrib
.hw_max_sectors
,
1575 dev
->dev_attrib
.hw_block_size
);
1576 dev
->dev_attrib
.optimal_sectors
= dev
->dev_attrib
.hw_max_sectors
;
1578 dev
->dev_index
= scsi_get_new_index(SCSI_DEVICE_INDEX
);
1579 dev
->creation_time
= get_jiffies_64();
1581 ret
= core_setup_alua(dev
);
1586 * Startup the struct se_device processing thread
1588 dev
->tmr_wq
= alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1,
1589 dev
->transport
->name
);
1591 pr_err("Unable to create tmr workqueue for %s\n",
1592 dev
->transport
->name
);
1598 * Setup work_queue for QUEUE_FULL
1600 INIT_WORK(&dev
->qf_work_queue
, target_qf_do_work
);
1603 * Preload the initial INQUIRY const values if we are doing
1604 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1605 * passthrough because this is being provided by the backend LLD.
1607 if (!(dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)) {
1608 strncpy(&dev
->t10_wwn
.vendor
[0], "LIO-ORG", 8);
1609 strncpy(&dev
->t10_wwn
.model
[0],
1610 dev
->transport
->inquiry_prod
, 16);
1611 strncpy(&dev
->t10_wwn
.revision
[0],
1612 dev
->transport
->inquiry_rev
, 4);
1615 scsi_dump_inquiry(dev
);
1617 spin_lock(&hba
->device_lock
);
1619 spin_unlock(&hba
->device_lock
);
1621 mutex_lock(&g_device_mutex
);
1622 list_add_tail(&dev
->g_dev_node
, &g_device_list
);
1623 mutex_unlock(&g_device_mutex
);
1625 dev
->dev_flags
|= DF_CONFIGURED
;
1630 core_alua_free_lu_gp_mem(dev
);
1632 se_release_vpd_for_dev(dev
);
1636 void target_free_device(struct se_device
*dev
)
1638 struct se_hba
*hba
= dev
->se_hba
;
1640 WARN_ON(!list_empty(&dev
->dev_sep_list
));
1642 if (dev
->dev_flags
& DF_CONFIGURED
) {
1643 destroy_workqueue(dev
->tmr_wq
);
1645 mutex_lock(&g_device_mutex
);
1646 list_del(&dev
->g_dev_node
);
1647 mutex_unlock(&g_device_mutex
);
1649 spin_lock(&hba
->device_lock
);
1651 spin_unlock(&hba
->device_lock
);
1654 core_alua_free_lu_gp_mem(dev
);
1655 core_alua_set_lba_map(dev
, NULL
, 0, 0);
1656 core_scsi3_free_all_registrations(dev
);
1657 se_release_vpd_for_dev(dev
);
1659 if (dev
->transport
->free_prot
)
1660 dev
->transport
->free_prot(dev
);
1662 dev
->transport
->free_device(dev
);
1665 int core_dev_setup_virtual_lun0(void)
1668 struct se_device
*dev
;
1669 char buf
[] = "rd_pages=8,rd_nullio=1";
1672 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
1674 return PTR_ERR(hba
);
1676 dev
= target_alloc_device(hba
, "virt_lun0");
1682 hba
->transport
->set_configfs_dev_params(dev
, buf
, sizeof(buf
));
1684 ret
= target_configure_device(dev
);
1686 goto out_free_se_dev
;
1693 target_free_device(dev
);
1695 core_delete_hba(hba
);
1700 void core_dev_release_virtual_lun0(void)
1702 struct se_hba
*hba
= lun0_hba
;
1708 target_free_device(g_lun0_dev
);
1709 core_delete_hba(hba
);
1713 * Common CDB parsing for kernel and user passthrough.
1716 passthrough_parse_cdb(struct se_cmd
*cmd
,
1717 sense_reason_t (*exec_cmd
)(struct se_cmd
*cmd
))
1719 unsigned char *cdb
= cmd
->t_task_cdb
;
1722 * Clear a lun set in the cdb if the initiator talking to use spoke
1723 * and old standards version, as we can't assume the underlying device
1724 * won't choke up on it.
1727 case READ_10
: /* SBC - RDProtect */
1728 case READ_12
: /* SBC - RDProtect */
1729 case READ_16
: /* SBC - RDProtect */
1730 case SEND_DIAGNOSTIC
: /* SPC - SELF-TEST Code */
1731 case VERIFY
: /* SBC - VRProtect */
1732 case VERIFY_16
: /* SBC - VRProtect */
1733 case WRITE_VERIFY
: /* SBC - VRProtect */
1734 case WRITE_VERIFY_12
: /* SBC - VRProtect */
1735 case MAINTENANCE_IN
: /* SPC - Parameter Data Format for SA RTPG */
1738 cdb
[1] &= 0x1f; /* clear logical unit number */
1743 * For REPORT LUNS we always need to emulate the response, for everything
1746 if (cdb
[0] == REPORT_LUNS
) {
1747 cmd
->execute_cmd
= spc_emulate_report_luns
;
1748 return TCM_NO_SENSE
;
1751 /* Set DATA_CDB flag for ops that should have it */
1762 case WRITE_VERIFY_12
:
1763 case 0x8e: /* WRITE_VERIFY_16 */
1764 case COMPARE_AND_WRITE
:
1765 case XDWRITEREAD_10
:
1766 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1768 case VARIABLE_LENGTH_CMD
:
1769 switch (get_unaligned_be16(&cdb
[8])) {
1772 case 0x0c: /* WRITE_VERIFY_32 */
1773 case XDWRITEREAD_32
:
1774 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1779 cmd
->execute_cmd
= exec_cmd
;
1781 return TCM_NO_SENSE
;
1783 EXPORT_SYMBOL(passthrough_parse_cdb
);