]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/target/loopback/tcm_loop.c
scsi: use host wide tags by default
[mirror_ubuntu-zesty-kernel.git] / drivers / target / loopback / tcm_loop.c
1 /*******************************************************************************
2 *
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
5 *
6 * © Copyright 2011-2013 Datera, Inc.
7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
37 #include <target/target_core_fabric_configfs.h>
38
39 #include "tcm_loop.h"
40
41 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
42
43 static struct workqueue_struct *tcm_loop_workqueue;
44 static struct kmem_cache *tcm_loop_cmd_cache;
45
46 static int tcm_loop_hba_no_cnt;
47
48 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
49
50 /*
51 * Called from struct target_core_fabric_ops->check_stop_free()
52 */
53 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
54 {
55 /*
56 * Do not release struct se_cmd's containing a valid TMR
57 * pointer. These will be released directly in tcm_loop_device_reset()
58 * with transport_generic_free_cmd().
59 */
60 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
61 return 0;
62 /*
63 * Release the struct se_cmd, which will make a callback to release
64 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
65 */
66 transport_generic_free_cmd(se_cmd, 0);
67 return 1;
68 }
69
70 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
71 {
72 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
73 struct tcm_loop_cmd, tl_se_cmd);
74
75 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
76 }
77
78 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
79 {
80 seq_printf(m, "tcm_loop_proc_info()\n");
81 return 0;
82 }
83
84 static int tcm_loop_driver_probe(struct device *);
85 static int tcm_loop_driver_remove(struct device *);
86
87 static int pseudo_lld_bus_match(struct device *dev,
88 struct device_driver *dev_driver)
89 {
90 return 1;
91 }
92
93 static struct bus_type tcm_loop_lld_bus = {
94 .name = "tcm_loop_bus",
95 .match = pseudo_lld_bus_match,
96 .probe = tcm_loop_driver_probe,
97 .remove = tcm_loop_driver_remove,
98 };
99
100 static struct device_driver tcm_loop_driverfs = {
101 .name = "tcm_loop",
102 .bus = &tcm_loop_lld_bus,
103 };
104 /*
105 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
106 */
107 static struct device *tcm_loop_primary;
108
109 static void tcm_loop_submission_work(struct work_struct *work)
110 {
111 struct tcm_loop_cmd *tl_cmd =
112 container_of(work, struct tcm_loop_cmd, work);
113 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
114 struct scsi_cmnd *sc = tl_cmd->sc;
115 struct tcm_loop_nexus *tl_nexus;
116 struct tcm_loop_hba *tl_hba;
117 struct tcm_loop_tpg *tl_tpg;
118 struct scatterlist *sgl_bidi = NULL;
119 u32 sgl_bidi_count = 0, transfer_length;
120 int rc;
121
122 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
123 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
124
125 /*
126 * Ensure that this tl_tpg reference from the incoming sc->device->id
127 * has already been configured via tcm_loop_make_naa_tpg().
128 */
129 if (!tl_tpg->tl_hba) {
130 set_host_byte(sc, DID_NO_CONNECT);
131 goto out_done;
132 }
133 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
134 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
135 goto out_done;
136 }
137 tl_nexus = tl_tpg->tl_nexus;
138 if (!tl_nexus) {
139 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
140 " does not exist\n");
141 set_host_byte(sc, DID_ERROR);
142 goto out_done;
143 }
144 if (scsi_bidi_cmnd(sc)) {
145 struct scsi_data_buffer *sdb = scsi_in(sc);
146
147 sgl_bidi = sdb->table.sgl;
148 sgl_bidi_count = sdb->table.nents;
149 se_cmd->se_cmd_flags |= SCF_BIDI;
150
151 }
152
153 transfer_length = scsi_transfer_length(sc);
154 if (!scsi_prot_sg_count(sc) &&
155 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
156 se_cmd->prot_pto = true;
157 /*
158 * loopback transport doesn't support
159 * WRITE_GENERATE, READ_STRIP protection
160 * information operations, go ahead unprotected.
161 */
162 transfer_length = scsi_bufflen(sc);
163 }
164
165 se_cmd->tag = tl_cmd->sc_cmd_tag;
166 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
167 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
168 transfer_length, TCM_SIMPLE_TAG,
169 sc->sc_data_direction, 0,
170 scsi_sglist(sc), scsi_sg_count(sc),
171 sgl_bidi, sgl_bidi_count,
172 scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
173 if (rc < 0) {
174 set_host_byte(sc, DID_NO_CONNECT);
175 goto out_done;
176 }
177 return;
178
179 out_done:
180 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
181 sc->scsi_done(sc);
182 return;
183 }
184
185 /*
186 * ->queuecommand can be and usually is called from interrupt context, so
187 * defer the actual submission to a workqueue.
188 */
189 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
190 {
191 struct tcm_loop_cmd *tl_cmd;
192
193 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
194 " scsi_buf_len: %u\n", sc->device->host->host_no,
195 sc->device->id, sc->device->channel, sc->device->lun,
196 sc->cmnd[0], scsi_bufflen(sc));
197
198 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
199 if (!tl_cmd) {
200 pr_err("Unable to allocate struct tcm_loop_cmd\n");
201 set_host_byte(sc, DID_ERROR);
202 sc->scsi_done(sc);
203 return 0;
204 }
205
206 tl_cmd->sc = sc;
207 tl_cmd->sc_cmd_tag = sc->request->tag;
208 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
209 queue_work(tcm_loop_workqueue, &tl_cmd->work);
210 return 0;
211 }
212
213 /*
214 * Called from SCSI EH process context to issue a LUN_RESET TMR
215 * to struct scsi_device
216 */
217 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
218 u64 lun, int task, enum tcm_tmreq_table tmr)
219 {
220 struct se_cmd *se_cmd = NULL;
221 struct se_session *se_sess;
222 struct se_portal_group *se_tpg;
223 struct tcm_loop_nexus *tl_nexus;
224 struct tcm_loop_cmd *tl_cmd = NULL;
225 struct tcm_loop_tmr *tl_tmr = NULL;
226 int ret = TMR_FUNCTION_FAILED, rc;
227
228 /*
229 * Locate the tl_nexus and se_sess pointers
230 */
231 tl_nexus = tl_tpg->tl_nexus;
232 if (!tl_nexus) {
233 pr_err("Unable to perform device reset without"
234 " active I_T Nexus\n");
235 return ret;
236 }
237
238 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
239 if (!tl_cmd) {
240 pr_err("Unable to allocate memory for tl_cmd\n");
241 return ret;
242 }
243
244 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
245 if (!tl_tmr) {
246 pr_err("Unable to allocate memory for tl_tmr\n");
247 goto release;
248 }
249 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
250
251 se_cmd = &tl_cmd->tl_se_cmd;
252 se_tpg = &tl_tpg->tl_se_tpg;
253 se_sess = tl_tpg->tl_nexus->se_sess;
254 /*
255 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
256 */
257 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
258 DMA_NONE, TCM_SIMPLE_TAG,
259 &tl_cmd->tl_sense_buf[0]);
260
261 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
262 if (rc < 0)
263 goto release;
264
265 if (tmr == TMR_ABORT_TASK)
266 se_cmd->se_tmr_req->ref_task_tag = task;
267
268 /*
269 * Locate the underlying TCM struct se_lun
270 */
271 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
272 ret = TMR_LUN_DOES_NOT_EXIST;
273 goto release;
274 }
275 /*
276 * Queue the TMR to TCM Core and sleep waiting for
277 * tcm_loop_queue_tm_rsp() to wake us up.
278 */
279 transport_generic_handle_tmr(se_cmd);
280 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
281 /*
282 * The TMR LUN_RESET has completed, check the response status and
283 * then release allocations.
284 */
285 ret = se_cmd->se_tmr_req->response;
286 release:
287 if (se_cmd)
288 transport_generic_free_cmd(se_cmd, 1);
289 else
290 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
291 kfree(tl_tmr);
292 return ret;
293 }
294
295 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
296 {
297 struct tcm_loop_hba *tl_hba;
298 struct tcm_loop_tpg *tl_tpg;
299 int ret = FAILED;
300
301 /*
302 * Locate the tcm_loop_hba_t pointer
303 */
304 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
305 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
306 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
307 sc->request->tag, TMR_ABORT_TASK);
308 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
309 }
310
311 /*
312 * Called from SCSI EH process context to issue a LUN_RESET TMR
313 * to struct scsi_device
314 */
315 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
316 {
317 struct tcm_loop_hba *tl_hba;
318 struct tcm_loop_tpg *tl_tpg;
319 int ret = FAILED;
320
321 /*
322 * Locate the tcm_loop_hba_t pointer
323 */
324 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
325 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
326
327 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
328 0, TMR_LUN_RESET);
329 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
330 }
331
332 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
333 {
334 struct tcm_loop_hba *tl_hba;
335 struct tcm_loop_tpg *tl_tpg;
336
337 /*
338 * Locate the tcm_loop_hba_t pointer
339 */
340 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
341 if (!tl_hba) {
342 pr_err("Unable to perform device reset without"
343 " active I_T Nexus\n");
344 return FAILED;
345 }
346 /*
347 * Locate the tl_tpg pointer from TargetID in sc->device->id
348 */
349 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
350 if (tl_tpg) {
351 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
352 return SUCCESS;
353 }
354 return FAILED;
355 }
356
357 static int tcm_loop_slave_alloc(struct scsi_device *sd)
358 {
359 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
360 return 0;
361 }
362
363 static struct scsi_host_template tcm_loop_driver_template = {
364 .show_info = tcm_loop_show_info,
365 .proc_name = "tcm_loopback",
366 .name = "TCM_Loopback",
367 .queuecommand = tcm_loop_queuecommand,
368 .change_queue_depth = scsi_change_queue_depth,
369 .eh_abort_handler = tcm_loop_abort_task,
370 .eh_device_reset_handler = tcm_loop_device_reset,
371 .eh_target_reset_handler = tcm_loop_target_reset,
372 .can_queue = 1024,
373 .this_id = -1,
374 .sg_tablesize = 256,
375 .cmd_per_lun = 1024,
376 .max_sectors = 0xFFFF,
377 .use_clustering = DISABLE_CLUSTERING,
378 .slave_alloc = tcm_loop_slave_alloc,
379 .module = THIS_MODULE,
380 .track_queue_depth = 1,
381 };
382
383 static int tcm_loop_driver_probe(struct device *dev)
384 {
385 struct tcm_loop_hba *tl_hba;
386 struct Scsi_Host *sh;
387 int error, host_prot;
388
389 tl_hba = to_tcm_loop_hba(dev);
390
391 sh = scsi_host_alloc(&tcm_loop_driver_template,
392 sizeof(struct tcm_loop_hba));
393 if (!sh) {
394 pr_err("Unable to allocate struct scsi_host\n");
395 return -ENODEV;
396 }
397 tl_hba->sh = sh;
398
399 /*
400 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
401 */
402 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
403 /*
404 * Setup single ID, Channel and LUN for now..
405 */
406 sh->max_id = 2;
407 sh->max_lun = 0;
408 sh->max_channel = 0;
409 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
410
411 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
412 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
413 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
414
415 scsi_host_set_prot(sh, host_prot);
416 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
417
418 error = scsi_add_host(sh, &tl_hba->dev);
419 if (error) {
420 pr_err("%s: scsi_add_host failed\n", __func__);
421 scsi_host_put(sh);
422 return -ENODEV;
423 }
424 return 0;
425 }
426
427 static int tcm_loop_driver_remove(struct device *dev)
428 {
429 struct tcm_loop_hba *tl_hba;
430 struct Scsi_Host *sh;
431
432 tl_hba = to_tcm_loop_hba(dev);
433 sh = tl_hba->sh;
434
435 scsi_remove_host(sh);
436 scsi_host_put(sh);
437 return 0;
438 }
439
440 static void tcm_loop_release_adapter(struct device *dev)
441 {
442 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
443
444 kfree(tl_hba);
445 }
446
447 /*
448 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
449 */
450 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
451 {
452 int ret;
453
454 tl_hba->dev.bus = &tcm_loop_lld_bus;
455 tl_hba->dev.parent = tcm_loop_primary;
456 tl_hba->dev.release = &tcm_loop_release_adapter;
457 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
458
459 ret = device_register(&tl_hba->dev);
460 if (ret) {
461 pr_err("device_register() failed for"
462 " tl_hba->dev: %d\n", ret);
463 return -ENODEV;
464 }
465
466 return 0;
467 }
468
469 /*
470 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
471 * tcm_loop SCSI bus.
472 */
473 static int tcm_loop_alloc_core_bus(void)
474 {
475 int ret;
476
477 tcm_loop_primary = root_device_register("tcm_loop_0");
478 if (IS_ERR(tcm_loop_primary)) {
479 pr_err("Unable to allocate tcm_loop_primary\n");
480 return PTR_ERR(tcm_loop_primary);
481 }
482
483 ret = bus_register(&tcm_loop_lld_bus);
484 if (ret) {
485 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
486 goto dev_unreg;
487 }
488
489 ret = driver_register(&tcm_loop_driverfs);
490 if (ret) {
491 pr_err("driver_register() failed for"
492 "tcm_loop_driverfs\n");
493 goto bus_unreg;
494 }
495
496 pr_debug("Initialized TCM Loop Core Bus\n");
497 return ret;
498
499 bus_unreg:
500 bus_unregister(&tcm_loop_lld_bus);
501 dev_unreg:
502 root_device_unregister(tcm_loop_primary);
503 return ret;
504 }
505
506 static void tcm_loop_release_core_bus(void)
507 {
508 driver_unregister(&tcm_loop_driverfs);
509 bus_unregister(&tcm_loop_lld_bus);
510 root_device_unregister(tcm_loop_primary);
511
512 pr_debug("Releasing TCM Loop Core BUS\n");
513 }
514
515 static char *tcm_loop_get_fabric_name(void)
516 {
517 return "loopback";
518 }
519
520 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
521 {
522 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
523 }
524
525 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
526 {
527 /*
528 * Return the passed NAA identifier for the Target Port
529 */
530 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
531 }
532
533 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
534 {
535 /*
536 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
537 * to represent the SCSI Target Port.
538 */
539 return tl_tpg(se_tpg)->tl_tpgt;
540 }
541
542 /*
543 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
544 * based upon the incoming fabric dependent SCSI Initiator Port
545 */
546 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
547 {
548 return 1;
549 }
550
551 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
552 {
553 return 0;
554 }
555
556 /*
557 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
558 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
559 */
560 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
561 {
562 return 0;
563 }
564
565 /*
566 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
567 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
568 * It has been added here as a nop for target_fabric_tf_ops_check()
569 */
570 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
571 {
572 return 0;
573 }
574
575 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
576 {
577 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
578 tl_se_tpg);
579 return tl_tpg->tl_fabric_prot_type;
580 }
581
582 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
583 {
584 return 1;
585 }
586
587 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
588 {
589 return 1;
590 }
591
592 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
593 {
594 return;
595 }
596
597 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
598 {
599 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
600 struct tcm_loop_cmd, tl_se_cmd);
601
602 return tl_cmd->sc_cmd_state;
603 }
604
605 static int tcm_loop_shutdown_session(struct se_session *se_sess)
606 {
607 return 0;
608 }
609
610 static void tcm_loop_close_session(struct se_session *se_sess)
611 {
612 return;
613 };
614
615 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
616 {
617 /*
618 * Since Linux/SCSI has already sent down a struct scsi_cmnd
619 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
620 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
621 * format with transport_generic_map_mem_to_cmd().
622 *
623 * We now tell TCM to add this WRITE CDB directly into the TCM storage
624 * object execution queue.
625 */
626 target_execute_cmd(se_cmd);
627 return 0;
628 }
629
630 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
631 {
632 return 0;
633 }
634
635 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
636 {
637 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
638 struct tcm_loop_cmd, tl_se_cmd);
639 struct scsi_cmnd *sc = tl_cmd->sc;
640
641 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
642 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
643
644 sc->result = SAM_STAT_GOOD;
645 set_host_byte(sc, DID_OK);
646 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
647 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
648 scsi_set_resid(sc, se_cmd->residual_count);
649 sc->scsi_done(sc);
650 return 0;
651 }
652
653 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
654 {
655 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
656 struct tcm_loop_cmd, tl_se_cmd);
657 struct scsi_cmnd *sc = tl_cmd->sc;
658
659 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
660 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
661
662 if (se_cmd->sense_buffer &&
663 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
664 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
665
666 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
667 SCSI_SENSE_BUFFERSIZE);
668 sc->result = SAM_STAT_CHECK_CONDITION;
669 set_driver_byte(sc, DRIVER_SENSE);
670 } else
671 sc->result = se_cmd->scsi_status;
672
673 set_host_byte(sc, DID_OK);
674 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
675 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
676 scsi_set_resid(sc, se_cmd->residual_count);
677 sc->scsi_done(sc);
678 return 0;
679 }
680
681 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
682 {
683 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
684 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
685 /*
686 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
687 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
688 */
689 atomic_set(&tl_tmr->tmr_complete, 1);
690 wake_up(&tl_tmr->tl_tmr_wait);
691 }
692
693 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
694 {
695 return;
696 }
697
698 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
699 {
700 switch (tl_hba->tl_proto_id) {
701 case SCSI_PROTOCOL_SAS:
702 return "SAS";
703 case SCSI_PROTOCOL_FCP:
704 return "FCP";
705 case SCSI_PROTOCOL_ISCSI:
706 return "iSCSI";
707 default:
708 break;
709 }
710
711 return "Unknown";
712 }
713
714 /* Start items for tcm_loop_port_cit */
715
716 static int tcm_loop_port_link(
717 struct se_portal_group *se_tpg,
718 struct se_lun *lun)
719 {
720 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
721 struct tcm_loop_tpg, tl_se_tpg);
722 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
723
724 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
725 /*
726 * Add Linux/SCSI struct scsi_device by HCTL
727 */
728 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
729
730 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
731 return 0;
732 }
733
734 static void tcm_loop_port_unlink(
735 struct se_portal_group *se_tpg,
736 struct se_lun *se_lun)
737 {
738 struct scsi_device *sd;
739 struct tcm_loop_hba *tl_hba;
740 struct tcm_loop_tpg *tl_tpg;
741
742 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
743 tl_hba = tl_tpg->tl_hba;
744
745 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
746 se_lun->unpacked_lun);
747 if (!sd) {
748 pr_err("Unable to locate struct scsi_device for %d:%d:"
749 "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
750 return;
751 }
752 /*
753 * Remove Linux/SCSI struct scsi_device by HCTL
754 */
755 scsi_remove_device(sd);
756 scsi_device_put(sd);
757
758 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
759
760 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
761 }
762
763 /* End items for tcm_loop_port_cit */
764
765 static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type(
766 struct se_portal_group *se_tpg,
767 char *page)
768 {
769 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
770 tl_se_tpg);
771
772 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
773 }
774
775 static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type(
776 struct se_portal_group *se_tpg,
777 const char *page,
778 size_t count)
779 {
780 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
781 tl_se_tpg);
782 unsigned long val;
783 int ret = kstrtoul(page, 0, &val);
784
785 if (ret) {
786 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
787 return ret;
788 }
789 if (val != 0 && val != 1 && val != 3) {
790 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
791 return -EINVAL;
792 }
793 tl_tpg->tl_fabric_prot_type = val;
794
795 return count;
796 }
797
798 TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR);
799
800 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
801 &tcm_loop_tpg_attrib_fabric_prot_type.attr,
802 NULL,
803 };
804
805 /* Start items for tcm_loop_nexus_cit */
806
807 static int tcm_loop_make_nexus(
808 struct tcm_loop_tpg *tl_tpg,
809 const char *name)
810 {
811 struct se_portal_group *se_tpg;
812 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
813 struct tcm_loop_nexus *tl_nexus;
814 int ret = -ENOMEM;
815
816 if (tl_tpg->tl_nexus) {
817 pr_debug("tl_tpg->tl_nexus already exists\n");
818 return -EEXIST;
819 }
820 se_tpg = &tl_tpg->tl_se_tpg;
821
822 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
823 if (!tl_nexus) {
824 pr_err("Unable to allocate struct tcm_loop_nexus\n");
825 return -ENOMEM;
826 }
827 /*
828 * Initialize the struct se_session pointer
829 */
830 tl_nexus->se_sess = transport_init_session(
831 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
832 if (IS_ERR(tl_nexus->se_sess)) {
833 ret = PTR_ERR(tl_nexus->se_sess);
834 goto out;
835 }
836 /*
837 * Since we are running in 'demo mode' this call with generate a
838 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
839 * Initiator port name of the passed configfs group 'name'.
840 */
841 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
842 se_tpg, (unsigned char *)name);
843 if (!tl_nexus->se_sess->se_node_acl) {
844 transport_free_session(tl_nexus->se_sess);
845 goto out;
846 }
847 /* Now, register the I_T Nexus as active. */
848 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
849 tl_nexus->se_sess, tl_nexus);
850 tl_tpg->tl_nexus = tl_nexus;
851 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
852 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
853 name);
854 return 0;
855
856 out:
857 kfree(tl_nexus);
858 return ret;
859 }
860
861 static int tcm_loop_drop_nexus(
862 struct tcm_loop_tpg *tpg)
863 {
864 struct se_session *se_sess;
865 struct tcm_loop_nexus *tl_nexus;
866
867 tl_nexus = tpg->tl_nexus;
868 if (!tl_nexus)
869 return -ENODEV;
870
871 se_sess = tl_nexus->se_sess;
872 if (!se_sess)
873 return -ENODEV;
874
875 if (atomic_read(&tpg->tl_tpg_port_count)) {
876 pr_err("Unable to remove TCM_Loop I_T Nexus with"
877 " active TPG port count: %d\n",
878 atomic_read(&tpg->tl_tpg_port_count));
879 return -EPERM;
880 }
881
882 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
883 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
884 tl_nexus->se_sess->se_node_acl->initiatorname);
885 /*
886 * Release the SCSI I_T Nexus to the emulated Target Port
887 */
888 transport_deregister_session(tl_nexus->se_sess);
889 tpg->tl_nexus = NULL;
890 kfree(tl_nexus);
891 return 0;
892 }
893
894 /* End items for tcm_loop_nexus_cit */
895
896 static ssize_t tcm_loop_tpg_show_nexus(
897 struct se_portal_group *se_tpg,
898 char *page)
899 {
900 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
901 struct tcm_loop_tpg, tl_se_tpg);
902 struct tcm_loop_nexus *tl_nexus;
903 ssize_t ret;
904
905 tl_nexus = tl_tpg->tl_nexus;
906 if (!tl_nexus)
907 return -ENODEV;
908
909 ret = snprintf(page, PAGE_SIZE, "%s\n",
910 tl_nexus->se_sess->se_node_acl->initiatorname);
911
912 return ret;
913 }
914
915 static ssize_t tcm_loop_tpg_store_nexus(
916 struct se_portal_group *se_tpg,
917 const char *page,
918 size_t count)
919 {
920 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
921 struct tcm_loop_tpg, tl_se_tpg);
922 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
923 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
924 int ret;
925 /*
926 * Shutdown the active I_T nexus if 'NULL' is passed..
927 */
928 if (!strncmp(page, "NULL", 4)) {
929 ret = tcm_loop_drop_nexus(tl_tpg);
930 return (!ret) ? count : ret;
931 }
932 /*
933 * Otherwise make sure the passed virtual Initiator port WWN matches
934 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
935 * tcm_loop_make_nexus()
936 */
937 if (strlen(page) >= TL_WWN_ADDR_LEN) {
938 pr_err("Emulated NAA Sas Address: %s, exceeds"
939 " max: %d\n", page, TL_WWN_ADDR_LEN);
940 return -EINVAL;
941 }
942 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
943
944 ptr = strstr(i_port, "naa.");
945 if (ptr) {
946 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
947 pr_err("Passed SAS Initiator Port %s does not"
948 " match target port protoid: %s\n", i_port,
949 tcm_loop_dump_proto_id(tl_hba));
950 return -EINVAL;
951 }
952 port_ptr = &i_port[0];
953 goto check_newline;
954 }
955 ptr = strstr(i_port, "fc.");
956 if (ptr) {
957 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
958 pr_err("Passed FCP Initiator Port %s does not"
959 " match target port protoid: %s\n", i_port,
960 tcm_loop_dump_proto_id(tl_hba));
961 return -EINVAL;
962 }
963 port_ptr = &i_port[3]; /* Skip over "fc." */
964 goto check_newline;
965 }
966 ptr = strstr(i_port, "iqn.");
967 if (ptr) {
968 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
969 pr_err("Passed iSCSI Initiator Port %s does not"
970 " match target port protoid: %s\n", i_port,
971 tcm_loop_dump_proto_id(tl_hba));
972 return -EINVAL;
973 }
974 port_ptr = &i_port[0];
975 goto check_newline;
976 }
977 pr_err("Unable to locate prefix for emulated Initiator Port:"
978 " %s\n", i_port);
979 return -EINVAL;
980 /*
981 * Clear any trailing newline for the NAA WWN
982 */
983 check_newline:
984 if (i_port[strlen(i_port)-1] == '\n')
985 i_port[strlen(i_port)-1] = '\0';
986
987 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
988 if (ret < 0)
989 return ret;
990
991 return count;
992 }
993
994 TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
995
996 static ssize_t tcm_loop_tpg_show_transport_status(
997 struct se_portal_group *se_tpg,
998 char *page)
999 {
1000 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1001 struct tcm_loop_tpg, tl_se_tpg);
1002 const char *status = NULL;
1003 ssize_t ret = -EINVAL;
1004
1005 switch (tl_tpg->tl_transport_status) {
1006 case TCM_TRANSPORT_ONLINE:
1007 status = "online";
1008 break;
1009 case TCM_TRANSPORT_OFFLINE:
1010 status = "offline";
1011 break;
1012 default:
1013 break;
1014 }
1015
1016 if (status)
1017 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1018
1019 return ret;
1020 }
1021
1022 static ssize_t tcm_loop_tpg_store_transport_status(
1023 struct se_portal_group *se_tpg,
1024 const char *page,
1025 size_t count)
1026 {
1027 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1028 struct tcm_loop_tpg, tl_se_tpg);
1029
1030 if (!strncmp(page, "online", 6)) {
1031 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1032 return count;
1033 }
1034 if (!strncmp(page, "offline", 7)) {
1035 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1036 if (tl_tpg->tl_nexus) {
1037 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1038
1039 core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1040 }
1041 return count;
1042 }
1043 return -EINVAL;
1044 }
1045
1046 TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
1047
1048 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1049 &tcm_loop_tpg_nexus.attr,
1050 &tcm_loop_tpg_transport_status.attr,
1051 NULL,
1052 };
1053
1054 /* Start items for tcm_loop_naa_cit */
1055
1056 static struct se_portal_group *tcm_loop_make_naa_tpg(
1057 struct se_wwn *wwn,
1058 struct config_group *group,
1059 const char *name)
1060 {
1061 struct tcm_loop_hba *tl_hba = container_of(wwn,
1062 struct tcm_loop_hba, tl_hba_wwn);
1063 struct tcm_loop_tpg *tl_tpg;
1064 int ret;
1065 unsigned long tpgt;
1066
1067 if (strstr(name, "tpgt_") != name) {
1068 pr_err("Unable to locate \"tpgt_#\" directory"
1069 " group\n");
1070 return ERR_PTR(-EINVAL);
1071 }
1072 if (kstrtoul(name+5, 10, &tpgt))
1073 return ERR_PTR(-EINVAL);
1074
1075 if (tpgt >= TL_TPGS_PER_HBA) {
1076 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1077 " %u\n", tpgt, TL_TPGS_PER_HBA);
1078 return ERR_PTR(-EINVAL);
1079 }
1080 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1081 tl_tpg->tl_hba = tl_hba;
1082 tl_tpg->tl_tpgt = tpgt;
1083 /*
1084 * Register the tl_tpg as a emulated TCM Target Endpoint
1085 */
1086 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1087 if (ret < 0)
1088 return ERR_PTR(-ENOMEM);
1089
1090 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1091 " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1092 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1093
1094 return &tl_tpg->tl_se_tpg;
1095 }
1096
1097 static void tcm_loop_drop_naa_tpg(
1098 struct se_portal_group *se_tpg)
1099 {
1100 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1101 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1102 struct tcm_loop_tpg, tl_se_tpg);
1103 struct tcm_loop_hba *tl_hba;
1104 unsigned short tpgt;
1105
1106 tl_hba = tl_tpg->tl_hba;
1107 tpgt = tl_tpg->tl_tpgt;
1108 /*
1109 * Release the I_T Nexus for the Virtual target link if present
1110 */
1111 tcm_loop_drop_nexus(tl_tpg);
1112 /*
1113 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1114 */
1115 core_tpg_deregister(se_tpg);
1116
1117 tl_tpg->tl_hba = NULL;
1118 tl_tpg->tl_tpgt = 0;
1119
1120 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1121 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1122 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1123 }
1124
1125 /* End items for tcm_loop_naa_cit */
1126
1127 /* Start items for tcm_loop_cit */
1128
1129 static struct se_wwn *tcm_loop_make_scsi_hba(
1130 struct target_fabric_configfs *tf,
1131 struct config_group *group,
1132 const char *name)
1133 {
1134 struct tcm_loop_hba *tl_hba;
1135 struct Scsi_Host *sh;
1136 char *ptr;
1137 int ret, off = 0;
1138
1139 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1140 if (!tl_hba) {
1141 pr_err("Unable to allocate struct tcm_loop_hba\n");
1142 return ERR_PTR(-ENOMEM);
1143 }
1144 /*
1145 * Determine the emulated Protocol Identifier and Target Port Name
1146 * based on the incoming configfs directory name.
1147 */
1148 ptr = strstr(name, "naa.");
1149 if (ptr) {
1150 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1151 goto check_len;
1152 }
1153 ptr = strstr(name, "fc.");
1154 if (ptr) {
1155 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1156 off = 3; /* Skip over "fc." */
1157 goto check_len;
1158 }
1159 ptr = strstr(name, "iqn.");
1160 if (!ptr) {
1161 pr_err("Unable to locate prefix for emulated Target "
1162 "Port: %s\n", name);
1163 ret = -EINVAL;
1164 goto out;
1165 }
1166 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1167
1168 check_len:
1169 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1170 pr_err("Emulated NAA %s Address: %s, exceeds"
1171 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1172 TL_WWN_ADDR_LEN);
1173 ret = -EINVAL;
1174 goto out;
1175 }
1176 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1177
1178 /*
1179 * Call device_register(tl_hba->dev) to register the emulated
1180 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1181 * device_register() callbacks in tcm_loop_driver_probe()
1182 */
1183 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1184 if (ret)
1185 goto out;
1186
1187 sh = tl_hba->sh;
1188 tcm_loop_hba_no_cnt++;
1189 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1190 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1191 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1192
1193 return &tl_hba->tl_hba_wwn;
1194 out:
1195 kfree(tl_hba);
1196 return ERR_PTR(ret);
1197 }
1198
1199 static void tcm_loop_drop_scsi_hba(
1200 struct se_wwn *wwn)
1201 {
1202 struct tcm_loop_hba *tl_hba = container_of(wwn,
1203 struct tcm_loop_hba, tl_hba_wwn);
1204
1205 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1206 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1207 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1208 tl_hba->sh->host_no);
1209 /*
1210 * Call device_unregister() on the original tl_hba->dev.
1211 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1212 * release *tl_hba;
1213 */
1214 device_unregister(&tl_hba->dev);
1215 }
1216
1217 /* Start items for tcm_loop_cit */
1218 static ssize_t tcm_loop_wwn_show_attr_version(
1219 struct target_fabric_configfs *tf,
1220 char *page)
1221 {
1222 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1223 }
1224
1225 TF_WWN_ATTR_RO(tcm_loop, version);
1226
1227 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1228 &tcm_loop_wwn_version.attr,
1229 NULL,
1230 };
1231
1232 /* End items for tcm_loop_cit */
1233
1234 static const struct target_core_fabric_ops loop_ops = {
1235 .module = THIS_MODULE,
1236 .name = "loopback",
1237 .get_fabric_name = tcm_loop_get_fabric_name,
1238 .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
1239 .tpg_get_tag = tcm_loop_get_tag,
1240 .tpg_check_demo_mode = tcm_loop_check_demo_mode,
1241 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
1242 .tpg_check_demo_mode_write_protect =
1243 tcm_loop_check_demo_mode_write_protect,
1244 .tpg_check_prod_mode_write_protect =
1245 tcm_loop_check_prod_mode_write_protect,
1246 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
1247 .tpg_get_inst_index = tcm_loop_get_inst_index,
1248 .check_stop_free = tcm_loop_check_stop_free,
1249 .release_cmd = tcm_loop_release_cmd,
1250 .shutdown_session = tcm_loop_shutdown_session,
1251 .close_session = tcm_loop_close_session,
1252 .sess_get_index = tcm_loop_sess_get_index,
1253 .write_pending = tcm_loop_write_pending,
1254 .write_pending_status = tcm_loop_write_pending_status,
1255 .set_default_node_attributes = tcm_loop_set_default_node_attributes,
1256 .get_cmd_state = tcm_loop_get_cmd_state,
1257 .queue_data_in = tcm_loop_queue_data_in,
1258 .queue_status = tcm_loop_queue_status,
1259 .queue_tm_rsp = tcm_loop_queue_tm_rsp,
1260 .aborted_task = tcm_loop_aborted_task,
1261 .fabric_make_wwn = tcm_loop_make_scsi_hba,
1262 .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
1263 .fabric_make_tpg = tcm_loop_make_naa_tpg,
1264 .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
1265 .fabric_post_link = tcm_loop_port_link,
1266 .fabric_pre_unlink = tcm_loop_port_unlink,
1267 .tfc_wwn_attrs = tcm_loop_wwn_attrs,
1268 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
1269 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
1270 };
1271
1272 static int __init tcm_loop_fabric_init(void)
1273 {
1274 int ret = -ENOMEM;
1275
1276 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1277 if (!tcm_loop_workqueue)
1278 goto out;
1279
1280 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1281 sizeof(struct tcm_loop_cmd),
1282 __alignof__(struct tcm_loop_cmd),
1283 0, NULL);
1284 if (!tcm_loop_cmd_cache) {
1285 pr_debug("kmem_cache_create() for"
1286 " tcm_loop_cmd_cache failed\n");
1287 goto out_destroy_workqueue;
1288 }
1289
1290 ret = tcm_loop_alloc_core_bus();
1291 if (ret)
1292 goto out_destroy_cache;
1293
1294 ret = target_register_template(&loop_ops);
1295 if (ret)
1296 goto out_release_core_bus;
1297
1298 return 0;
1299
1300 out_release_core_bus:
1301 tcm_loop_release_core_bus();
1302 out_destroy_cache:
1303 kmem_cache_destroy(tcm_loop_cmd_cache);
1304 out_destroy_workqueue:
1305 destroy_workqueue(tcm_loop_workqueue);
1306 out:
1307 return ret;
1308 }
1309
1310 static void __exit tcm_loop_fabric_exit(void)
1311 {
1312 target_unregister_template(&loop_ops);
1313 tcm_loop_release_core_bus();
1314 kmem_cache_destroy(tcm_loop_cmd_cache);
1315 destroy_workqueue(tcm_loop_workqueue);
1316 }
1317
1318 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1319 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1320 MODULE_LICENSE("GPL");
1321 module_init(tcm_loop_fabric_init);
1322 module_exit(tcm_loop_fabric_exit);