]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/target/target_core_device.c
target: Add support for COMPARE_AND_WRITE emulation
[mirror_ubuntu-jammy-kernel.git] / drivers / target / target_core_device.c
1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * (c) Copyright 2003-2012 RisingTide Systems LLC.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
40
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
44
45 #include "target_core_internal.h"
46 #include "target_core_alua.h"
47 #include "target_core_pr.h"
48 #include "target_core_ua.h"
49
50 static struct se_hba *lun0_hba;
51 /* not static, needed by tpg.c */
52 struct se_device *g_lun0_dev;
53
54 sense_reason_t
55 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
56 {
57 struct se_lun *se_lun = NULL;
58 struct se_session *se_sess = se_cmd->se_sess;
59 struct se_device *dev;
60 unsigned long flags;
61
62 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
63 return TCM_NON_EXISTENT_LUN;
64
65 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
66 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
67 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
68 struct se_dev_entry *deve = se_cmd->se_deve;
69
70 deve->total_cmds++;
71
72 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
73 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
74 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
75 " Access for 0x%08x\n",
76 se_cmd->se_tfo->get_fabric_name(),
77 unpacked_lun);
78 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
79 return TCM_WRITE_PROTECTED;
80 }
81
82 if (se_cmd->data_direction == DMA_TO_DEVICE)
83 deve->write_bytes += se_cmd->data_length;
84 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
85 deve->read_bytes += se_cmd->data_length;
86
87 se_lun = deve->se_lun;
88 se_cmd->se_lun = deve->se_lun;
89 se_cmd->pr_res_key = deve->pr_res_key;
90 se_cmd->orig_fe_lun = unpacked_lun;
91 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
92 }
93 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
94
95 if (!se_lun) {
96 /*
97 * Use the se_portal_group->tpg_virt_lun0 to allow for
98 * REPORT_LUNS, et al to be returned when no active
99 * MappedLUN=0 exists for this Initiator Port.
100 */
101 if (unpacked_lun != 0) {
102 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
103 " Access for 0x%08x\n",
104 se_cmd->se_tfo->get_fabric_name(),
105 unpacked_lun);
106 return TCM_NON_EXISTENT_LUN;
107 }
108 /*
109 * Force WRITE PROTECT for virtual LUN 0
110 */
111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
112 (se_cmd->data_direction != DMA_NONE))
113 return TCM_WRITE_PROTECTED;
114
115 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
116 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
117 se_cmd->orig_fe_lun = 0;
118 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
119 }
120
121 /* Directly associate cmd with se_dev */
122 se_cmd->se_dev = se_lun->lun_se_dev;
123
124 /* TODO: get rid of this and use atomics for stats */
125 dev = se_lun->lun_se_dev;
126 spin_lock_irqsave(&dev->stats_lock, flags);
127 dev->num_cmds++;
128 if (se_cmd->data_direction == DMA_TO_DEVICE)
129 dev->write_bytes += se_cmd->data_length;
130 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
131 dev->read_bytes += se_cmd->data_length;
132 spin_unlock_irqrestore(&dev->stats_lock, flags);
133
134 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
135 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
136 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
137
138 return 0;
139 }
140 EXPORT_SYMBOL(transport_lookup_cmd_lun);
141
142 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
143 {
144 struct se_dev_entry *deve;
145 struct se_lun *se_lun = NULL;
146 struct se_session *se_sess = se_cmd->se_sess;
147 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
148 unsigned long flags;
149
150 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
151 return -ENODEV;
152
153 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
154 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
155 deve = se_cmd->se_deve;
156
157 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
158 se_tmr->tmr_lun = deve->se_lun;
159 se_cmd->se_lun = deve->se_lun;
160 se_lun = deve->se_lun;
161 se_cmd->pr_res_key = deve->pr_res_key;
162 se_cmd->orig_fe_lun = unpacked_lun;
163 }
164 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
165
166 if (!se_lun) {
167 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
168 " Access for 0x%08x\n",
169 se_cmd->se_tfo->get_fabric_name(),
170 unpacked_lun);
171 return -ENODEV;
172 }
173
174 /* Directly associate cmd with se_dev */
175 se_cmd->se_dev = se_lun->lun_se_dev;
176 se_tmr->tmr_dev = se_lun->lun_se_dev;
177
178 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
179 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
180 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
181
182 return 0;
183 }
184 EXPORT_SYMBOL(transport_lookup_tmr_lun);
185
186 /*
187 * This function is called from core_scsi3_emulate_pro_register_and_move()
188 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
189 * when a matching rtpi is found.
190 */
191 struct se_dev_entry *core_get_se_deve_from_rtpi(
192 struct se_node_acl *nacl,
193 u16 rtpi)
194 {
195 struct se_dev_entry *deve;
196 struct se_lun *lun;
197 struct se_port *port;
198 struct se_portal_group *tpg = nacl->se_tpg;
199 u32 i;
200
201 spin_lock_irq(&nacl->device_list_lock);
202 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
203 deve = nacl->device_list[i];
204
205 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
206 continue;
207
208 lun = deve->se_lun;
209 if (!lun) {
210 pr_err("%s device entries device pointer is"
211 " NULL, but Initiator has access.\n",
212 tpg->se_tpg_tfo->get_fabric_name());
213 continue;
214 }
215 port = lun->lun_sep;
216 if (!port) {
217 pr_err("%s device entries device pointer is"
218 " NULL, but Initiator has access.\n",
219 tpg->se_tpg_tfo->get_fabric_name());
220 continue;
221 }
222 if (port->sep_rtpi != rtpi)
223 continue;
224
225 atomic_inc(&deve->pr_ref_count);
226 smp_mb__after_atomic_inc();
227 spin_unlock_irq(&nacl->device_list_lock);
228
229 return deve;
230 }
231 spin_unlock_irq(&nacl->device_list_lock);
232
233 return NULL;
234 }
235
236 int core_free_device_list_for_node(
237 struct se_node_acl *nacl,
238 struct se_portal_group *tpg)
239 {
240 struct se_dev_entry *deve;
241 struct se_lun *lun;
242 u32 i;
243
244 if (!nacl->device_list)
245 return 0;
246
247 spin_lock_irq(&nacl->device_list_lock);
248 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
249 deve = nacl->device_list[i];
250
251 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
252 continue;
253
254 if (!deve->se_lun) {
255 pr_err("%s device entries device pointer is"
256 " NULL, but Initiator has access.\n",
257 tpg->se_tpg_tfo->get_fabric_name());
258 continue;
259 }
260 lun = deve->se_lun;
261
262 spin_unlock_irq(&nacl->device_list_lock);
263 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
264 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
265 spin_lock_irq(&nacl->device_list_lock);
266 }
267 spin_unlock_irq(&nacl->device_list_lock);
268
269 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
270 nacl->device_list = NULL;
271
272 return 0;
273 }
274
275 void core_update_device_list_access(
276 u32 mapped_lun,
277 u32 lun_access,
278 struct se_node_acl *nacl)
279 {
280 struct se_dev_entry *deve;
281
282 spin_lock_irq(&nacl->device_list_lock);
283 deve = nacl->device_list[mapped_lun];
284 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
285 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
286 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
287 } else {
288 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
289 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
290 }
291 spin_unlock_irq(&nacl->device_list_lock);
292 }
293
294 /* core_enable_device_list_for_node():
295 *
296 *
297 */
298 int core_enable_device_list_for_node(
299 struct se_lun *lun,
300 struct se_lun_acl *lun_acl,
301 u32 mapped_lun,
302 u32 lun_access,
303 struct se_node_acl *nacl,
304 struct se_portal_group *tpg)
305 {
306 struct se_port *port = lun->lun_sep;
307 struct se_dev_entry *deve;
308
309 spin_lock_irq(&nacl->device_list_lock);
310
311 deve = nacl->device_list[mapped_lun];
312
313 /*
314 * Check if the call is handling demo mode -> explict LUN ACL
315 * transition. This transition must be for the same struct se_lun
316 * + mapped_lun that was setup in demo mode..
317 */
318 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
319 if (deve->se_lun_acl != NULL) {
320 pr_err("struct se_dev_entry->se_lun_acl"
321 " already set for demo mode -> explict"
322 " LUN ACL transition\n");
323 spin_unlock_irq(&nacl->device_list_lock);
324 return -EINVAL;
325 }
326 if (deve->se_lun != lun) {
327 pr_err("struct se_dev_entry->se_lun does"
328 " match passed struct se_lun for demo mode"
329 " -> explict LUN ACL transition\n");
330 spin_unlock_irq(&nacl->device_list_lock);
331 return -EINVAL;
332 }
333 deve->se_lun_acl = lun_acl;
334
335 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
336 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
337 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
338 } else {
339 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
340 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
341 }
342
343 spin_unlock_irq(&nacl->device_list_lock);
344 return 0;
345 }
346
347 deve->se_lun = lun;
348 deve->se_lun_acl = lun_acl;
349 deve->mapped_lun = mapped_lun;
350 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
351
352 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
353 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
354 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
355 } else {
356 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
357 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
358 }
359
360 deve->creation_time = get_jiffies_64();
361 deve->attach_count++;
362 spin_unlock_irq(&nacl->device_list_lock);
363
364 spin_lock_bh(&port->sep_alua_lock);
365 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
366 spin_unlock_bh(&port->sep_alua_lock);
367
368 return 0;
369 }
370
371 /* core_disable_device_list_for_node():
372 *
373 *
374 */
375 int core_disable_device_list_for_node(
376 struct se_lun *lun,
377 struct se_lun_acl *lun_acl,
378 u32 mapped_lun,
379 u32 lun_access,
380 struct se_node_acl *nacl,
381 struct se_portal_group *tpg)
382 {
383 struct se_port *port = lun->lun_sep;
384 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
385
386 /*
387 * If the MappedLUN entry is being disabled, the entry in
388 * port->sep_alua_list must be removed now before clearing the
389 * struct se_dev_entry pointers below as logic in
390 * core_alua_do_transition_tg_pt() depends on these being present.
391 *
392 * deve->se_lun_acl will be NULL for demo-mode created LUNs
393 * that have not been explicitly converted to MappedLUNs ->
394 * struct se_lun_acl, but we remove deve->alua_port_list from
395 * port->sep_alua_list. This also means that active UAs and
396 * NodeACL context specific PR metadata for demo-mode
397 * MappedLUN *deve will be released below..
398 */
399 spin_lock_bh(&port->sep_alua_lock);
400 list_del(&deve->alua_port_list);
401 spin_unlock_bh(&port->sep_alua_lock);
402 /*
403 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
404 * PR operation to complete.
405 */
406 while (atomic_read(&deve->pr_ref_count) != 0)
407 cpu_relax();
408
409 spin_lock_irq(&nacl->device_list_lock);
410 /*
411 * Disable struct se_dev_entry LUN ACL mapping
412 */
413 core_scsi3_ua_release_all(deve);
414 deve->se_lun = NULL;
415 deve->se_lun_acl = NULL;
416 deve->lun_flags = 0;
417 deve->creation_time = 0;
418 deve->attach_count--;
419 spin_unlock_irq(&nacl->device_list_lock);
420
421 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
422 return 0;
423 }
424
425 /* core_clear_lun_from_tpg():
426 *
427 *
428 */
429 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
430 {
431 struct se_node_acl *nacl;
432 struct se_dev_entry *deve;
433 u32 i;
434
435 spin_lock_irq(&tpg->acl_node_lock);
436 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
437 spin_unlock_irq(&tpg->acl_node_lock);
438
439 spin_lock_irq(&nacl->device_list_lock);
440 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
441 deve = nacl->device_list[i];
442 if (lun != deve->se_lun)
443 continue;
444 spin_unlock_irq(&nacl->device_list_lock);
445
446 core_disable_device_list_for_node(lun, NULL,
447 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
448 nacl, tpg);
449
450 spin_lock_irq(&nacl->device_list_lock);
451 }
452 spin_unlock_irq(&nacl->device_list_lock);
453
454 spin_lock_irq(&tpg->acl_node_lock);
455 }
456 spin_unlock_irq(&tpg->acl_node_lock);
457 }
458
459 static struct se_port *core_alloc_port(struct se_device *dev)
460 {
461 struct se_port *port, *port_tmp;
462
463 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
464 if (!port) {
465 pr_err("Unable to allocate struct se_port\n");
466 return ERR_PTR(-ENOMEM);
467 }
468 INIT_LIST_HEAD(&port->sep_alua_list);
469 INIT_LIST_HEAD(&port->sep_list);
470 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
471 spin_lock_init(&port->sep_alua_lock);
472 mutex_init(&port->sep_tg_pt_md_mutex);
473
474 spin_lock(&dev->se_port_lock);
475 if (dev->dev_port_count == 0x0000ffff) {
476 pr_warn("Reached dev->dev_port_count =="
477 " 0x0000ffff\n");
478 spin_unlock(&dev->se_port_lock);
479 return ERR_PTR(-ENOSPC);
480 }
481 again:
482 /*
483 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
484 * Here is the table from spc4r17 section 7.7.3.8.
485 *
486 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
487 *
488 * Code Description
489 * 0h Reserved
490 * 1h Relative port 1, historically known as port A
491 * 2h Relative port 2, historically known as port B
492 * 3h to FFFFh Relative port 3 through 65 535
493 */
494 port->sep_rtpi = dev->dev_rpti_counter++;
495 if (!port->sep_rtpi)
496 goto again;
497
498 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
499 /*
500 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
501 * for 16-bit wrap..
502 */
503 if (port->sep_rtpi == port_tmp->sep_rtpi)
504 goto again;
505 }
506 spin_unlock(&dev->se_port_lock);
507
508 return port;
509 }
510
511 static void core_export_port(
512 struct se_device *dev,
513 struct se_portal_group *tpg,
514 struct se_port *port,
515 struct se_lun *lun)
516 {
517 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
518
519 spin_lock(&dev->se_port_lock);
520 spin_lock(&lun->lun_sep_lock);
521 port->sep_tpg = tpg;
522 port->sep_lun = lun;
523 lun->lun_sep = port;
524 spin_unlock(&lun->lun_sep_lock);
525
526 list_add_tail(&port->sep_list, &dev->dev_sep_list);
527 spin_unlock(&dev->se_port_lock);
528
529 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
530 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
531 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
532 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
533 pr_err("Unable to allocate t10_alua_tg_pt"
534 "_gp_member_t\n");
535 return;
536 }
537 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
538 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
539 dev->t10_alua.default_tg_pt_gp);
540 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
541 pr_debug("%s/%s: Adding to default ALUA Target Port"
542 " Group: alua/default_tg_pt_gp\n",
543 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
544 }
545
546 dev->dev_port_count++;
547 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
548 }
549
550 /*
551 * Called with struct se_device->se_port_lock spinlock held.
552 */
553 static void core_release_port(struct se_device *dev, struct se_port *port)
554 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
555 {
556 /*
557 * Wait for any port reference for PR ALL_TG_PT=1 operation
558 * to complete in __core_scsi3_alloc_registration()
559 */
560 spin_unlock(&dev->se_port_lock);
561 if (atomic_read(&port->sep_tg_pt_ref_cnt))
562 cpu_relax();
563 spin_lock(&dev->se_port_lock);
564
565 core_alua_free_tg_pt_gp_mem(port);
566
567 list_del(&port->sep_list);
568 dev->dev_port_count--;
569 kfree(port);
570 }
571
572 int core_dev_export(
573 struct se_device *dev,
574 struct se_portal_group *tpg,
575 struct se_lun *lun)
576 {
577 struct se_hba *hba = dev->se_hba;
578 struct se_port *port;
579
580 port = core_alloc_port(dev);
581 if (IS_ERR(port))
582 return PTR_ERR(port);
583
584 lun->lun_se_dev = dev;
585
586 spin_lock(&hba->device_lock);
587 dev->export_count++;
588 spin_unlock(&hba->device_lock);
589
590 core_export_port(dev, tpg, port, lun);
591 return 0;
592 }
593
594 void core_dev_unexport(
595 struct se_device *dev,
596 struct se_portal_group *tpg,
597 struct se_lun *lun)
598 {
599 struct se_hba *hba = dev->se_hba;
600 struct se_port *port = lun->lun_sep;
601
602 spin_lock(&lun->lun_sep_lock);
603 if (lun->lun_se_dev == NULL) {
604 spin_unlock(&lun->lun_sep_lock);
605 return;
606 }
607 spin_unlock(&lun->lun_sep_lock);
608
609 spin_lock(&dev->se_port_lock);
610 core_release_port(dev, port);
611 spin_unlock(&dev->se_port_lock);
612
613 spin_lock(&hba->device_lock);
614 dev->export_count--;
615 spin_unlock(&hba->device_lock);
616
617 lun->lun_se_dev = NULL;
618 }
619
620 static void se_release_vpd_for_dev(struct se_device *dev)
621 {
622 struct t10_vpd *vpd, *vpd_tmp;
623
624 spin_lock(&dev->t10_wwn.t10_vpd_lock);
625 list_for_each_entry_safe(vpd, vpd_tmp,
626 &dev->t10_wwn.t10_vpd_list, vpd_list) {
627 list_del(&vpd->vpd_list);
628 kfree(vpd);
629 }
630 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
631 }
632
633 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
634 {
635 u32 aligned_max_sectors;
636 u32 alignment;
637 /*
638 * Limit max_sectors to a PAGE_SIZE aligned value for modern
639 * transport_allocate_data_tasks() operation.
640 */
641 alignment = max(1ul, PAGE_SIZE / block_size);
642 aligned_max_sectors = rounddown(max_sectors, alignment);
643
644 if (max_sectors != aligned_max_sectors)
645 pr_info("Rounding down aligned max_sectors from %u to %u\n",
646 max_sectors, aligned_max_sectors);
647
648 return aligned_max_sectors;
649 }
650
651 int se_dev_set_max_unmap_lba_count(
652 struct se_device *dev,
653 u32 max_unmap_lba_count)
654 {
655 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
656 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
657 dev, dev->dev_attrib.max_unmap_lba_count);
658 return 0;
659 }
660
661 int se_dev_set_max_unmap_block_desc_count(
662 struct se_device *dev,
663 u32 max_unmap_block_desc_count)
664 {
665 dev->dev_attrib.max_unmap_block_desc_count =
666 max_unmap_block_desc_count;
667 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
668 dev, dev->dev_attrib.max_unmap_block_desc_count);
669 return 0;
670 }
671
672 int se_dev_set_unmap_granularity(
673 struct se_device *dev,
674 u32 unmap_granularity)
675 {
676 dev->dev_attrib.unmap_granularity = unmap_granularity;
677 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
678 dev, dev->dev_attrib.unmap_granularity);
679 return 0;
680 }
681
682 int se_dev_set_unmap_granularity_alignment(
683 struct se_device *dev,
684 u32 unmap_granularity_alignment)
685 {
686 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
687 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
688 dev, dev->dev_attrib.unmap_granularity_alignment);
689 return 0;
690 }
691
692 int se_dev_set_max_write_same_len(
693 struct se_device *dev,
694 u32 max_write_same_len)
695 {
696 dev->dev_attrib.max_write_same_len = max_write_same_len;
697 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
698 dev, dev->dev_attrib.max_write_same_len);
699 return 0;
700 }
701
702 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
703 {
704 const char *configname;
705
706 configname = config_item_name(&dev->dev_group.cg_item);
707 if (strlen(configname) >= 16) {
708 pr_warn("dev[%p]: Backstore name '%s' is too long for "
709 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
710 configname);
711 }
712 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
713 }
714
715 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
716 {
717 if (dev->export_count) {
718 pr_err("dev[%p]: Unable to change model alias"
719 " while export_count is %d\n",
720 dev, dev->export_count);
721 return -EINVAL;
722 }
723
724 if (flag != 0 && flag != 1) {
725 pr_err("Illegal value %d\n", flag);
726 return -EINVAL;
727 }
728
729 if (flag) {
730 dev_set_t10_wwn_model_alias(dev);
731 } else {
732 strncpy(&dev->t10_wwn.model[0],
733 dev->transport->inquiry_prod, 16);
734 }
735 dev->dev_attrib.emulate_model_alias = flag;
736
737 return 0;
738 }
739
740 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
741 {
742 if (flag != 0 && flag != 1) {
743 pr_err("Illegal value %d\n", flag);
744 return -EINVAL;
745 }
746
747 if (flag) {
748 pr_err("dpo_emulated not supported\n");
749 return -EINVAL;
750 }
751
752 return 0;
753 }
754
755 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
756 {
757 if (flag != 0 && flag != 1) {
758 pr_err("Illegal value %d\n", flag);
759 return -EINVAL;
760 }
761
762 if (flag &&
763 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
764 pr_err("emulate_fua_write not supported for pSCSI\n");
765 return -EINVAL;
766 }
767 dev->dev_attrib.emulate_fua_write = flag;
768 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
769 dev, dev->dev_attrib.emulate_fua_write);
770 return 0;
771 }
772
773 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
774 {
775 if (flag != 0 && flag != 1) {
776 pr_err("Illegal value %d\n", flag);
777 return -EINVAL;
778 }
779
780 if (flag) {
781 pr_err("ua read emulated not supported\n");
782 return -EINVAL;
783 }
784
785 return 0;
786 }
787
788 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
789 {
790 if (flag != 0 && flag != 1) {
791 pr_err("Illegal value %d\n", flag);
792 return -EINVAL;
793 }
794 if (flag &&
795 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
796 pr_err("emulate_write_cache not supported for pSCSI\n");
797 return -EINVAL;
798 }
799 if (dev->transport->get_write_cache) {
800 pr_warn("emulate_write_cache cannot be changed when underlying"
801 " HW reports WriteCacheEnabled, ignoring request\n");
802 return 0;
803 }
804
805 dev->dev_attrib.emulate_write_cache = flag;
806 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
807 dev, dev->dev_attrib.emulate_write_cache);
808 return 0;
809 }
810
811 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
812 {
813 if ((flag != 0) && (flag != 1) && (flag != 2)) {
814 pr_err("Illegal value %d\n", flag);
815 return -EINVAL;
816 }
817
818 if (dev->export_count) {
819 pr_err("dev[%p]: Unable to change SE Device"
820 " UA_INTRLCK_CTRL while export_count is %d\n",
821 dev, dev->export_count);
822 return -EINVAL;
823 }
824 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
825 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
826 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
827
828 return 0;
829 }
830
831 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
832 {
833 if ((flag != 0) && (flag != 1)) {
834 pr_err("Illegal value %d\n", flag);
835 return -EINVAL;
836 }
837
838 if (dev->export_count) {
839 pr_err("dev[%p]: Unable to change SE Device TAS while"
840 " export_count is %d\n",
841 dev, dev->export_count);
842 return -EINVAL;
843 }
844 dev->dev_attrib.emulate_tas = flag;
845 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
846 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
847
848 return 0;
849 }
850
851 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
852 {
853 if ((flag != 0) && (flag != 1)) {
854 pr_err("Illegal value %d\n", flag);
855 return -EINVAL;
856 }
857 /*
858 * We expect this value to be non-zero when generic Block Layer
859 * Discard supported is detected iblock_create_virtdevice().
860 */
861 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
862 pr_err("Generic Block Discard not supported\n");
863 return -ENOSYS;
864 }
865
866 dev->dev_attrib.emulate_tpu = flag;
867 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
868 dev, flag);
869 return 0;
870 }
871
872 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
873 {
874 if ((flag != 0) && (flag != 1)) {
875 pr_err("Illegal value %d\n", flag);
876 return -EINVAL;
877 }
878 /*
879 * We expect this value to be non-zero when generic Block Layer
880 * Discard supported is detected iblock_create_virtdevice().
881 */
882 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
883 pr_err("Generic Block Discard not supported\n");
884 return -ENOSYS;
885 }
886
887 dev->dev_attrib.emulate_tpws = flag;
888 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
889 dev, flag);
890 return 0;
891 }
892
893 int se_dev_set_emulate_caw(struct se_device *dev, int flag)
894 {
895 if (flag != 0 && flag != 1) {
896 pr_err("Illegal value %d\n", flag);
897 return -EINVAL;
898 }
899 dev->dev_attrib.emulate_caw = flag;
900 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
901 dev, flag);
902
903 return 0;
904 }
905
906 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
907 {
908 if ((flag != 0) && (flag != 1)) {
909 pr_err("Illegal value %d\n", flag);
910 return -EINVAL;
911 }
912 dev->dev_attrib.enforce_pr_isids = flag;
913 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
914 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
915 return 0;
916 }
917
918 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
919 {
920 if ((flag != 0) && (flag != 1)) {
921 printk(KERN_ERR "Illegal value %d\n", flag);
922 return -EINVAL;
923 }
924 dev->dev_attrib.is_nonrot = flag;
925 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
926 dev, flag);
927 return 0;
928 }
929
930 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
931 {
932 if (flag != 0) {
933 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
934 " reordering not implemented\n", dev);
935 return -ENOSYS;
936 }
937 dev->dev_attrib.emulate_rest_reord = flag;
938 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
939 return 0;
940 }
941
942 /*
943 * Note, this can only be called on unexported SE Device Object.
944 */
945 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
946 {
947 if (dev->export_count) {
948 pr_err("dev[%p]: Unable to change SE Device TCQ while"
949 " export_count is %d\n",
950 dev, dev->export_count);
951 return -EINVAL;
952 }
953 if (!queue_depth) {
954 pr_err("dev[%p]: Illegal ZERO value for queue"
955 "_depth\n", dev);
956 return -EINVAL;
957 }
958
959 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
960 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
961 pr_err("dev[%p]: Passed queue_depth: %u"
962 " exceeds TCM/SE_Device TCQ: %u\n",
963 dev, queue_depth,
964 dev->dev_attrib.hw_queue_depth);
965 return -EINVAL;
966 }
967 } else {
968 if (queue_depth > dev->dev_attrib.queue_depth) {
969 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
970 pr_err("dev[%p]: Passed queue_depth:"
971 " %u exceeds TCM/SE_Device MAX"
972 " TCQ: %u\n", dev, queue_depth,
973 dev->dev_attrib.hw_queue_depth);
974 return -EINVAL;
975 }
976 }
977 }
978
979 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
980 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
981 dev, queue_depth);
982 return 0;
983 }
984
985 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
986 {
987 int block_size = dev->dev_attrib.block_size;
988
989 if (dev->export_count) {
990 pr_err("dev[%p]: Unable to change SE Device"
991 " fabric_max_sectors while export_count is %d\n",
992 dev, dev->export_count);
993 return -EINVAL;
994 }
995 if (!fabric_max_sectors) {
996 pr_err("dev[%p]: Illegal ZERO value for"
997 " fabric_max_sectors\n", dev);
998 return -EINVAL;
999 }
1000 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1001 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1002 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1003 DA_STATUS_MAX_SECTORS_MIN);
1004 return -EINVAL;
1005 }
1006 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1007 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
1008 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1009 " greater than TCM/SE_Device max_sectors:"
1010 " %u\n", dev, fabric_max_sectors,
1011 dev->dev_attrib.hw_max_sectors);
1012 return -EINVAL;
1013 }
1014 } else {
1015 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1016 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1017 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1018 " %u\n", dev, fabric_max_sectors,
1019 DA_STATUS_MAX_SECTORS_MAX);
1020 return -EINVAL;
1021 }
1022 }
1023 /*
1024 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1025 */
1026 if (!block_size) {
1027 block_size = 512;
1028 pr_warn("Defaulting to 512 for zero block_size\n");
1029 }
1030 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1031 block_size);
1032
1033 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1034 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1035 dev, fabric_max_sectors);
1036 return 0;
1037 }
1038
1039 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1040 {
1041 if (dev->export_count) {
1042 pr_err("dev[%p]: Unable to change SE Device"
1043 " optimal_sectors while export_count is %d\n",
1044 dev, dev->export_count);
1045 return -EINVAL;
1046 }
1047 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1048 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1049 " changed for TCM/pSCSI\n", dev);
1050 return -EINVAL;
1051 }
1052 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1053 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1054 " greater than fabric_max_sectors: %u\n", dev,
1055 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1056 return -EINVAL;
1057 }
1058
1059 dev->dev_attrib.optimal_sectors = optimal_sectors;
1060 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1061 dev, optimal_sectors);
1062 return 0;
1063 }
1064
1065 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1066 {
1067 if (dev->export_count) {
1068 pr_err("dev[%p]: Unable to change SE Device block_size"
1069 " while export_count is %d\n",
1070 dev, dev->export_count);
1071 return -EINVAL;
1072 }
1073
1074 if ((block_size != 512) &&
1075 (block_size != 1024) &&
1076 (block_size != 2048) &&
1077 (block_size != 4096)) {
1078 pr_err("dev[%p]: Illegal value for block_device: %u"
1079 " for SE device, must be 512, 1024, 2048 or 4096\n",
1080 dev, block_size);
1081 return -EINVAL;
1082 }
1083
1084 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1085 pr_err("dev[%p]: Not allowed to change block_size for"
1086 " Physical Device, use for Linux/SCSI to change"
1087 " block_size for underlying hardware\n", dev);
1088 return -EINVAL;
1089 }
1090
1091 dev->dev_attrib.block_size = block_size;
1092 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1093 dev, block_size);
1094 return 0;
1095 }
1096
1097 struct se_lun *core_dev_add_lun(
1098 struct se_portal_group *tpg,
1099 struct se_device *dev,
1100 u32 lun)
1101 {
1102 struct se_lun *lun_p;
1103 int rc;
1104
1105 lun_p = core_tpg_pre_addlun(tpg, lun);
1106 if (IS_ERR(lun_p))
1107 return lun_p;
1108
1109 rc = core_tpg_post_addlun(tpg, lun_p,
1110 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1111 if (rc < 0)
1112 return ERR_PTR(rc);
1113
1114 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1115 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1116 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1117 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1118 /*
1119 * Update LUN maps for dynamically added initiators when
1120 * generate_node_acl is enabled.
1121 */
1122 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1123 struct se_node_acl *acl;
1124 spin_lock_irq(&tpg->acl_node_lock);
1125 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1126 if (acl->dynamic_node_acl &&
1127 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1128 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1129 spin_unlock_irq(&tpg->acl_node_lock);
1130 core_tpg_add_node_to_devs(acl, tpg);
1131 spin_lock_irq(&tpg->acl_node_lock);
1132 }
1133 }
1134 spin_unlock_irq(&tpg->acl_node_lock);
1135 }
1136
1137 return lun_p;
1138 }
1139
1140 /* core_dev_del_lun():
1141 *
1142 *
1143 */
1144 int core_dev_del_lun(
1145 struct se_portal_group *tpg,
1146 u32 unpacked_lun)
1147 {
1148 struct se_lun *lun;
1149
1150 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1151 if (IS_ERR(lun))
1152 return PTR_ERR(lun);
1153
1154 core_tpg_post_dellun(tpg, lun);
1155
1156 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1157 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1158 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1159 tpg->se_tpg_tfo->get_fabric_name());
1160
1161 return 0;
1162 }
1163
1164 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1165 {
1166 struct se_lun *lun;
1167
1168 spin_lock(&tpg->tpg_lun_lock);
1169 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1170 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1171 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1172 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1173 TRANSPORT_MAX_LUNS_PER_TPG-1,
1174 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1175 spin_unlock(&tpg->tpg_lun_lock);
1176 return NULL;
1177 }
1178 lun = tpg->tpg_lun_list[unpacked_lun];
1179
1180 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1181 pr_err("%s Logical Unit Number: %u is not free on"
1182 " Target Portal Group: %hu, ignoring request.\n",
1183 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1184 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1185 spin_unlock(&tpg->tpg_lun_lock);
1186 return NULL;
1187 }
1188 spin_unlock(&tpg->tpg_lun_lock);
1189
1190 return lun;
1191 }
1192
1193 /* core_dev_get_lun():
1194 *
1195 *
1196 */
1197 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1198 {
1199 struct se_lun *lun;
1200
1201 spin_lock(&tpg->tpg_lun_lock);
1202 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1203 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1204 "_TPG-1: %u for Target Portal Group: %hu\n",
1205 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1206 TRANSPORT_MAX_LUNS_PER_TPG-1,
1207 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1208 spin_unlock(&tpg->tpg_lun_lock);
1209 return NULL;
1210 }
1211 lun = tpg->tpg_lun_list[unpacked_lun];
1212
1213 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1214 pr_err("%s Logical Unit Number: %u is not active on"
1215 " Target Portal Group: %hu, ignoring request.\n",
1216 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1217 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1218 spin_unlock(&tpg->tpg_lun_lock);
1219 return NULL;
1220 }
1221 spin_unlock(&tpg->tpg_lun_lock);
1222
1223 return lun;
1224 }
1225
1226 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1227 struct se_portal_group *tpg,
1228 struct se_node_acl *nacl,
1229 u32 mapped_lun,
1230 int *ret)
1231 {
1232 struct se_lun_acl *lacl;
1233
1234 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1235 pr_err("%s InitiatorName exceeds maximum size.\n",
1236 tpg->se_tpg_tfo->get_fabric_name());
1237 *ret = -EOVERFLOW;
1238 return NULL;
1239 }
1240 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1241 if (!lacl) {
1242 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1243 *ret = -ENOMEM;
1244 return NULL;
1245 }
1246
1247 INIT_LIST_HEAD(&lacl->lacl_list);
1248 lacl->mapped_lun = mapped_lun;
1249 lacl->se_lun_nacl = nacl;
1250 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1251 nacl->initiatorname);
1252
1253 return lacl;
1254 }
1255
1256 int core_dev_add_initiator_node_lun_acl(
1257 struct se_portal_group *tpg,
1258 struct se_lun_acl *lacl,
1259 u32 unpacked_lun,
1260 u32 lun_access)
1261 {
1262 struct se_lun *lun;
1263 struct se_node_acl *nacl;
1264
1265 lun = core_dev_get_lun(tpg, unpacked_lun);
1266 if (!lun) {
1267 pr_err("%s Logical Unit Number: %u is not active on"
1268 " Target Portal Group: %hu, ignoring request.\n",
1269 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1270 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1271 return -EINVAL;
1272 }
1273
1274 nacl = lacl->se_lun_nacl;
1275 if (!nacl)
1276 return -EINVAL;
1277
1278 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1279 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1280 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1281
1282 lacl->se_lun = lun;
1283
1284 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1285 lun_access, nacl, tpg) < 0)
1286 return -EINVAL;
1287
1288 spin_lock(&lun->lun_acl_lock);
1289 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1290 atomic_inc(&lun->lun_acl_count);
1291 smp_mb__after_atomic_inc();
1292 spin_unlock(&lun->lun_acl_lock);
1293
1294 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1295 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1296 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1297 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1298 lacl->initiatorname);
1299 /*
1300 * Check to see if there are any existing persistent reservation APTPL
1301 * pre-registrations that need to be enabled for this LUN ACL..
1302 */
1303 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1304 return 0;
1305 }
1306
1307 /* core_dev_del_initiator_node_lun_acl():
1308 *
1309 *
1310 */
1311 int core_dev_del_initiator_node_lun_acl(
1312 struct se_portal_group *tpg,
1313 struct se_lun *lun,
1314 struct se_lun_acl *lacl)
1315 {
1316 struct se_node_acl *nacl;
1317
1318 nacl = lacl->se_lun_nacl;
1319 if (!nacl)
1320 return -EINVAL;
1321
1322 spin_lock(&lun->lun_acl_lock);
1323 list_del(&lacl->lacl_list);
1324 atomic_dec(&lun->lun_acl_count);
1325 smp_mb__after_atomic_dec();
1326 spin_unlock(&lun->lun_acl_lock);
1327
1328 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1329 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1330
1331 lacl->se_lun = NULL;
1332
1333 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1334 " InitiatorNode: %s Mapped LUN: %u\n",
1335 tpg->se_tpg_tfo->get_fabric_name(),
1336 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1337 lacl->initiatorname, lacl->mapped_lun);
1338
1339 return 0;
1340 }
1341
1342 void core_dev_free_initiator_node_lun_acl(
1343 struct se_portal_group *tpg,
1344 struct se_lun_acl *lacl)
1345 {
1346 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1347 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1348 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1349 tpg->se_tpg_tfo->get_fabric_name(),
1350 lacl->initiatorname, lacl->mapped_lun);
1351
1352 kfree(lacl);
1353 }
1354
1355 static void scsi_dump_inquiry(struct se_device *dev)
1356 {
1357 struct t10_wwn *wwn = &dev->t10_wwn;
1358 char buf[17];
1359 int i, device_type;
1360 /*
1361 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1362 */
1363 for (i = 0; i < 8; i++)
1364 if (wwn->vendor[i] >= 0x20)
1365 buf[i] = wwn->vendor[i];
1366 else
1367 buf[i] = ' ';
1368 buf[i] = '\0';
1369 pr_debug(" Vendor: %s\n", buf);
1370
1371 for (i = 0; i < 16; i++)
1372 if (wwn->model[i] >= 0x20)
1373 buf[i] = wwn->model[i];
1374 else
1375 buf[i] = ' ';
1376 buf[i] = '\0';
1377 pr_debug(" Model: %s\n", buf);
1378
1379 for (i = 0; i < 4; i++)
1380 if (wwn->revision[i] >= 0x20)
1381 buf[i] = wwn->revision[i];
1382 else
1383 buf[i] = ' ';
1384 buf[i] = '\0';
1385 pr_debug(" Revision: %s\n", buf);
1386
1387 device_type = dev->transport->get_device_type(dev);
1388 pr_debug(" Type: %s ", scsi_device_type(device_type));
1389 }
1390
1391 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1392 {
1393 struct se_device *dev;
1394
1395 dev = hba->transport->alloc_device(hba, name);
1396 if (!dev)
1397 return NULL;
1398
1399 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
1400 dev->se_hba = hba;
1401 dev->transport = hba->transport;
1402
1403 INIT_LIST_HEAD(&dev->dev_list);
1404 INIT_LIST_HEAD(&dev->dev_sep_list);
1405 INIT_LIST_HEAD(&dev->dev_tmr_list);
1406 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1407 INIT_LIST_HEAD(&dev->state_list);
1408 INIT_LIST_HEAD(&dev->qf_cmd_list);
1409 spin_lock_init(&dev->stats_lock);
1410 spin_lock_init(&dev->execute_task_lock);
1411 spin_lock_init(&dev->delayed_cmd_lock);
1412 spin_lock_init(&dev->dev_reservation_lock);
1413 spin_lock_init(&dev->se_port_lock);
1414 spin_lock_init(&dev->se_tmr_lock);
1415 spin_lock_init(&dev->qf_cmd_lock);
1416 sema_init(&dev->caw_sem, 1);
1417 atomic_set(&dev->dev_ordered_id, 0);
1418 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1419 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1420 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1421 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1422 spin_lock_init(&dev->t10_pr.registration_lock);
1423 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1424 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1425 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1426
1427 dev->t10_wwn.t10_dev = dev;
1428 dev->t10_alua.t10_dev = dev;
1429
1430 dev->dev_attrib.da_dev = dev;
1431 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1432 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1433 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1434 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1435 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1436 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1437 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1438 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1439 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1440 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1441 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1442 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1443 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1444 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1445 dev->dev_attrib.max_unmap_block_desc_count =
1446 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1447 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1448 dev->dev_attrib.unmap_granularity_alignment =
1449 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1450 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1451 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1452 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1453
1454 return dev;
1455 }
1456
1457 int target_configure_device(struct se_device *dev)
1458 {
1459 struct se_hba *hba = dev->se_hba;
1460 int ret;
1461
1462 if (dev->dev_flags & DF_CONFIGURED) {
1463 pr_err("se_dev->se_dev_ptr already set for storage"
1464 " object\n");
1465 return -EEXIST;
1466 }
1467
1468 ret = dev->transport->configure_device(dev);
1469 if (ret)
1470 goto out;
1471 dev->dev_flags |= DF_CONFIGURED;
1472
1473 /*
1474 * XXX: there is not much point to have two different values here..
1475 */
1476 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1477 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1478
1479 /*
1480 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1481 */
1482 dev->dev_attrib.hw_max_sectors =
1483 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1484 dev->dev_attrib.hw_block_size);
1485
1486 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1487 dev->creation_time = get_jiffies_64();
1488
1489 ret = core_setup_alua(dev);
1490 if (ret)
1491 goto out;
1492
1493 /*
1494 * Startup the struct se_device processing thread
1495 */
1496 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1497 dev->transport->name);
1498 if (!dev->tmr_wq) {
1499 pr_err("Unable to create tmr workqueue for %s\n",
1500 dev->transport->name);
1501 ret = -ENOMEM;
1502 goto out_free_alua;
1503 }
1504
1505 /*
1506 * Setup work_queue for QUEUE_FULL
1507 */
1508 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1509
1510 /*
1511 * Preload the initial INQUIRY const values if we are doing
1512 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1513 * passthrough because this is being provided by the backend LLD.
1514 */
1515 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1516 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1517 strncpy(&dev->t10_wwn.model[0],
1518 dev->transport->inquiry_prod, 16);
1519 strncpy(&dev->t10_wwn.revision[0],
1520 dev->transport->inquiry_rev, 4);
1521 }
1522
1523 scsi_dump_inquiry(dev);
1524
1525 spin_lock(&hba->device_lock);
1526 hba->dev_count++;
1527 spin_unlock(&hba->device_lock);
1528 return 0;
1529
1530 out_free_alua:
1531 core_alua_free_lu_gp_mem(dev);
1532 out:
1533 se_release_vpd_for_dev(dev);
1534 return ret;
1535 }
1536
1537 void target_free_device(struct se_device *dev)
1538 {
1539 struct se_hba *hba = dev->se_hba;
1540
1541 WARN_ON(!list_empty(&dev->dev_sep_list));
1542
1543 if (dev->dev_flags & DF_CONFIGURED) {
1544 destroy_workqueue(dev->tmr_wq);
1545
1546 spin_lock(&hba->device_lock);
1547 hba->dev_count--;
1548 spin_unlock(&hba->device_lock);
1549 }
1550
1551 core_alua_free_lu_gp_mem(dev);
1552 core_scsi3_free_all_registrations(dev);
1553 se_release_vpd_for_dev(dev);
1554
1555 dev->transport->free_device(dev);
1556 }
1557
1558 int core_dev_setup_virtual_lun0(void)
1559 {
1560 struct se_hba *hba;
1561 struct se_device *dev;
1562 char buf[] = "rd_pages=8,rd_nullio=1";
1563 int ret;
1564
1565 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1566 if (IS_ERR(hba))
1567 return PTR_ERR(hba);
1568
1569 dev = target_alloc_device(hba, "virt_lun0");
1570 if (!dev) {
1571 ret = -ENOMEM;
1572 goto out_free_hba;
1573 }
1574
1575 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1576
1577 ret = target_configure_device(dev);
1578 if (ret)
1579 goto out_free_se_dev;
1580
1581 lun0_hba = hba;
1582 g_lun0_dev = dev;
1583 return 0;
1584
1585 out_free_se_dev:
1586 target_free_device(dev);
1587 out_free_hba:
1588 core_delete_hba(hba);
1589 return ret;
1590 }
1591
1592
1593 void core_dev_release_virtual_lun0(void)
1594 {
1595 struct se_hba *hba = lun0_hba;
1596
1597 if (!hba)
1598 return;
1599
1600 if (g_lun0_dev)
1601 target_free_device(g_lun0_dev);
1602 core_delete_hba(hba);
1603 }