2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/blk-mq.h>
17 #include <linux/nvme.h>
18 #include <linux/module.h>
19 #include <linux/parser.h>
21 #include "../host/nvme.h"
22 #include "../host/fabrics.h"
24 #define NVME_LOOP_AQ_DEPTH 256
26 #define NVME_LOOP_MAX_SEGMENTS 256
29 * We handle AEN commands ourselves and don't even let the
30 * block layer know about them.
32 #define NVME_LOOP_NR_AEN_COMMANDS 1
33 #define NVME_LOOP_AQ_BLKMQ_DEPTH \
34 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
36 struct nvme_loop_iod
{
37 struct nvme_request nvme_req
;
38 struct nvme_command cmd
;
39 struct nvme_completion rsp
;
41 struct nvme_loop_queue
*queue
;
42 struct work_struct work
;
43 struct sg_table sg_table
;
44 struct scatterlist first_sgl
[];
47 struct nvme_loop_ctrl
{
49 struct nvme_loop_queue
*queues
;
52 struct blk_mq_tag_set admin_tag_set
;
54 struct list_head list
;
56 struct blk_mq_tag_set tag_set
;
57 struct nvme_loop_iod async_event_iod
;
58 struct nvme_ctrl ctrl
;
60 struct nvmet_ctrl
*target_ctrl
;
61 struct work_struct delete_work
;
62 struct work_struct reset_work
;
65 static inline struct nvme_loop_ctrl
*to_loop_ctrl(struct nvme_ctrl
*ctrl
)
67 return container_of(ctrl
, struct nvme_loop_ctrl
, ctrl
);
70 struct nvme_loop_queue
{
71 struct nvmet_cq nvme_cq
;
72 struct nvmet_sq nvme_sq
;
73 struct nvme_loop_ctrl
*ctrl
;
76 static struct nvmet_port
*nvmet_loop_port
;
78 static LIST_HEAD(nvme_loop_ctrl_list
);
79 static DEFINE_MUTEX(nvme_loop_ctrl_mutex
);
81 static void nvme_loop_queue_response(struct nvmet_req
*nvme_req
);
82 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*ctrl
);
84 static struct nvmet_fabrics_ops nvme_loop_ops
;
86 static inline int nvme_loop_queue_idx(struct nvme_loop_queue
*queue
)
88 return queue
- queue
->ctrl
->queues
;
91 static void nvme_loop_complete_rq(struct request
*req
)
93 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
95 nvme_cleanup_cmd(req
);
96 sg_free_table_chained(&iod
->sg_table
, true);
97 nvme_complete_rq(req
);
100 static struct blk_mq_tags
*nvme_loop_tagset(struct nvme_loop_queue
*queue
)
102 u32 queue_idx
= nvme_loop_queue_idx(queue
);
105 return queue
->ctrl
->admin_tag_set
.tags
[queue_idx
];
106 return queue
->ctrl
->tag_set
.tags
[queue_idx
- 1];
109 static void nvme_loop_queue_response(struct nvmet_req
*req
)
111 struct nvme_loop_queue
*queue
=
112 container_of(req
->sq
, struct nvme_loop_queue
, nvme_sq
);
113 struct nvme_completion
*cqe
= req
->rsp
;
116 * AEN requests are special as they don't time out and can
117 * survive any kind of queue freeze and often don't respond to
118 * aborts. We don't even bother to allocate a struct request
119 * for them but rather special case them here.
121 if (unlikely(nvme_loop_queue_idx(queue
) == 0 &&
122 cqe
->command_id
>= NVME_LOOP_AQ_BLKMQ_DEPTH
)) {
123 nvme_complete_async_event(&queue
->ctrl
->ctrl
, cqe
->status
,
128 rq
= blk_mq_tag_to_rq(nvme_loop_tagset(queue
), cqe
->command_id
);
130 dev_err(queue
->ctrl
->ctrl
.device
,
131 "tag 0x%x on queue %d not found\n",
132 cqe
->command_id
, nvme_loop_queue_idx(queue
));
136 nvme_end_request(rq
, cqe
->status
, cqe
->result
);
140 static void nvme_loop_execute_work(struct work_struct
*work
)
142 struct nvme_loop_iod
*iod
=
143 container_of(work
, struct nvme_loop_iod
, work
);
145 iod
->req
.execute(&iod
->req
);
148 static enum blk_eh_timer_return
149 nvme_loop_timeout(struct request
*rq
, bool reserved
)
151 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(rq
);
153 /* queue error recovery */
154 schedule_work(&iod
->queue
->ctrl
->reset_work
);
156 /* fail with DNR on admin cmd timeout */
157 nvme_req(rq
)->status
= NVME_SC_ABORT_REQ
| NVME_SC_DNR
;
159 return BLK_EH_HANDLED
;
162 static blk_status_t
nvme_loop_queue_rq(struct blk_mq_hw_ctx
*hctx
,
163 const struct blk_mq_queue_data
*bd
)
165 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
166 struct nvme_loop_queue
*queue
= hctx
->driver_data
;
167 struct request
*req
= bd
->rq
;
168 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
171 ret
= nvme_setup_cmd(ns
, req
, &iod
->cmd
);
175 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
176 iod
->req
.port
= nvmet_loop_port
;
177 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
,
178 &queue
->nvme_sq
, &nvme_loop_ops
)) {
179 nvme_cleanup_cmd(req
);
180 blk_mq_start_request(req
);
181 nvme_loop_queue_response(&iod
->req
);
185 if (blk_rq_bytes(req
)) {
186 iod
->sg_table
.sgl
= iod
->first_sgl
;
187 if (sg_alloc_table_chained(&iod
->sg_table
,
188 blk_rq_nr_phys_segments(req
),
190 return BLK_STS_RESOURCE
;
192 iod
->req
.sg
= iod
->sg_table
.sgl
;
193 iod
->req
.sg_cnt
= blk_rq_map_sg(req
->q
, req
, iod
->sg_table
.sgl
);
196 blk_mq_start_request(req
);
198 schedule_work(&iod
->work
);
202 static void nvme_loop_submit_async_event(struct nvme_ctrl
*arg
, int aer_idx
)
204 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(arg
);
205 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
206 struct nvme_loop_iod
*iod
= &ctrl
->async_event_iod
;
208 memset(&iod
->cmd
, 0, sizeof(iod
->cmd
));
209 iod
->cmd
.common
.opcode
= nvme_admin_async_event
;
210 iod
->cmd
.common
.command_id
= NVME_LOOP_AQ_BLKMQ_DEPTH
;
211 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
213 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
, &queue
->nvme_sq
,
215 dev_err(ctrl
->ctrl
.device
, "failed async event work\n");
219 schedule_work(&iod
->work
);
222 static int nvme_loop_init_iod(struct nvme_loop_ctrl
*ctrl
,
223 struct nvme_loop_iod
*iod
, unsigned int queue_idx
)
225 iod
->req
.cmd
= &iod
->cmd
;
226 iod
->req
.rsp
= &iod
->rsp
;
227 iod
->queue
= &ctrl
->queues
[queue_idx
];
228 INIT_WORK(&iod
->work
, nvme_loop_execute_work
);
232 static int nvme_loop_init_request(struct blk_mq_tag_set
*set
,
233 struct request
*req
, unsigned int hctx_idx
,
234 unsigned int numa_node
)
236 return nvme_loop_init_iod(set
->driver_data
, blk_mq_rq_to_pdu(req
),
240 static int nvme_loop_init_admin_request(struct blk_mq_tag_set
*set
,
241 struct request
*req
, unsigned int hctx_idx
,
242 unsigned int numa_node
)
244 return nvme_loop_init_iod(set
->driver_data
, blk_mq_rq_to_pdu(req
), 0);
247 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
248 unsigned int hctx_idx
)
250 struct nvme_loop_ctrl
*ctrl
= data
;
251 struct nvme_loop_queue
*queue
= &ctrl
->queues
[hctx_idx
+ 1];
253 BUG_ON(hctx_idx
>= ctrl
->queue_count
);
255 hctx
->driver_data
= queue
;
259 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
260 unsigned int hctx_idx
)
262 struct nvme_loop_ctrl
*ctrl
= data
;
263 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
265 BUG_ON(hctx_idx
!= 0);
267 hctx
->driver_data
= queue
;
271 static const struct blk_mq_ops nvme_loop_mq_ops
= {
272 .queue_rq
= nvme_loop_queue_rq
,
273 .complete
= nvme_loop_complete_rq
,
274 .init_request
= nvme_loop_init_request
,
275 .init_hctx
= nvme_loop_init_hctx
,
276 .timeout
= nvme_loop_timeout
,
279 static const struct blk_mq_ops nvme_loop_admin_mq_ops
= {
280 .queue_rq
= nvme_loop_queue_rq
,
281 .complete
= nvme_loop_complete_rq
,
282 .init_request
= nvme_loop_init_admin_request
,
283 .init_hctx
= nvme_loop_init_admin_hctx
,
284 .timeout
= nvme_loop_timeout
,
287 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl
*ctrl
)
289 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
290 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
291 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
294 static void nvme_loop_free_ctrl(struct nvme_ctrl
*nctrl
)
296 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
298 if (list_empty(&ctrl
->list
))
301 mutex_lock(&nvme_loop_ctrl_mutex
);
302 list_del(&ctrl
->list
);
303 mutex_unlock(&nvme_loop_ctrl_mutex
);
306 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
307 blk_mq_free_tag_set(&ctrl
->tag_set
);
310 nvmf_free_options(nctrl
->opts
);
315 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl
*ctrl
)
319 for (i
= 1; i
< ctrl
->queue_count
; i
++)
320 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
323 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl
*ctrl
)
325 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
326 unsigned int nr_io_queues
;
329 nr_io_queues
= min(opts
->nr_io_queues
, num_online_cpus());
330 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
331 if (ret
|| !nr_io_queues
)
334 dev_info(ctrl
->ctrl
.device
, "creating %d I/O queues.\n", nr_io_queues
);
336 for (i
= 1; i
<= nr_io_queues
; i
++) {
337 ctrl
->queues
[i
].ctrl
= ctrl
;
338 ret
= nvmet_sq_init(&ctrl
->queues
[i
].nvme_sq
);
340 goto out_destroy_queues
;
348 nvme_loop_destroy_io_queues(ctrl
);
352 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl
*ctrl
)
356 for (i
= 1; i
< ctrl
->queue_count
; i
++) {
357 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
365 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl
*ctrl
)
369 memset(&ctrl
->admin_tag_set
, 0, sizeof(ctrl
->admin_tag_set
));
370 ctrl
->admin_tag_set
.ops
= &nvme_loop_admin_mq_ops
;
371 ctrl
->admin_tag_set
.queue_depth
= NVME_LOOP_AQ_BLKMQ_DEPTH
;
372 ctrl
->admin_tag_set
.reserved_tags
= 2; /* connect + keep-alive */
373 ctrl
->admin_tag_set
.numa_node
= NUMA_NO_NODE
;
374 ctrl
->admin_tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
375 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
376 ctrl
->admin_tag_set
.driver_data
= ctrl
;
377 ctrl
->admin_tag_set
.nr_hw_queues
= 1;
378 ctrl
->admin_tag_set
.timeout
= ADMIN_TIMEOUT
;
380 ctrl
->queues
[0].ctrl
= ctrl
;
381 error
= nvmet_sq_init(&ctrl
->queues
[0].nvme_sq
);
384 ctrl
->queue_count
= 1;
386 error
= blk_mq_alloc_tag_set(&ctrl
->admin_tag_set
);
390 ctrl
->ctrl
.admin_q
= blk_mq_init_queue(&ctrl
->admin_tag_set
);
391 if (IS_ERR(ctrl
->ctrl
.admin_q
)) {
392 error
= PTR_ERR(ctrl
->ctrl
.admin_q
);
393 goto out_free_tagset
;
396 error
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
398 goto out_cleanup_queue
;
400 error
= nvmf_reg_read64(&ctrl
->ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
402 dev_err(ctrl
->ctrl
.device
,
403 "prop_get NVME_REG_CAP failed\n");
404 goto out_cleanup_queue
;
408 min_t(int, NVME_CAP_MQES(ctrl
->cap
), ctrl
->ctrl
.sqsize
);
410 error
= nvme_enable_ctrl(&ctrl
->ctrl
, ctrl
->cap
);
412 goto out_cleanup_queue
;
414 ctrl
->ctrl
.max_hw_sectors
=
415 (NVME_LOOP_MAX_SEGMENTS
- 1) << (PAGE_SHIFT
- 9);
417 error
= nvme_init_identify(&ctrl
->ctrl
);
419 goto out_cleanup_queue
;
421 nvme_start_keep_alive(&ctrl
->ctrl
);
426 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
428 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
430 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
434 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl
*ctrl
)
436 nvme_stop_keep_alive(&ctrl
->ctrl
);
438 if (ctrl
->queue_count
> 1) {
439 nvme_stop_queues(&ctrl
->ctrl
);
440 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
441 nvme_cancel_request
, &ctrl
->ctrl
);
442 nvme_loop_destroy_io_queues(ctrl
);
445 if (ctrl
->ctrl
.state
== NVME_CTRL_LIVE
)
446 nvme_shutdown_ctrl(&ctrl
->ctrl
);
448 blk_mq_stop_hw_queues(ctrl
->ctrl
.admin_q
);
449 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
450 nvme_cancel_request
, &ctrl
->ctrl
);
451 nvme_loop_destroy_admin_queue(ctrl
);
454 static void nvme_loop_del_ctrl_work(struct work_struct
*work
)
456 struct nvme_loop_ctrl
*ctrl
= container_of(work
,
457 struct nvme_loop_ctrl
, delete_work
);
459 nvme_uninit_ctrl(&ctrl
->ctrl
);
460 nvme_loop_shutdown_ctrl(ctrl
);
461 nvme_put_ctrl(&ctrl
->ctrl
);
464 static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl
*ctrl
)
466 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_DELETING
))
469 if (!schedule_work(&ctrl
->delete_work
))
475 static int nvme_loop_del_ctrl(struct nvme_ctrl
*nctrl
)
477 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
480 ret
= __nvme_loop_del_ctrl(ctrl
);
484 flush_work(&ctrl
->delete_work
);
489 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*nctrl
)
491 struct nvme_loop_ctrl
*ctrl
;
493 mutex_lock(&nvme_loop_ctrl_mutex
);
494 list_for_each_entry(ctrl
, &nvme_loop_ctrl_list
, list
) {
495 if (ctrl
->ctrl
.cntlid
== nctrl
->cntlid
)
496 __nvme_loop_del_ctrl(ctrl
);
498 mutex_unlock(&nvme_loop_ctrl_mutex
);
501 static void nvme_loop_reset_ctrl_work(struct work_struct
*work
)
503 struct nvme_loop_ctrl
*ctrl
= container_of(work
,
504 struct nvme_loop_ctrl
, reset_work
);
508 nvme_loop_shutdown_ctrl(ctrl
);
510 ret
= nvme_loop_configure_admin_queue(ctrl
);
514 ret
= nvme_loop_init_io_queues(ctrl
);
516 goto out_destroy_admin
;
518 ret
= nvme_loop_connect_io_queues(ctrl
);
522 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
523 WARN_ON_ONCE(!changed
);
525 nvme_queue_scan(&ctrl
->ctrl
);
526 nvme_queue_async_events(&ctrl
->ctrl
);
528 nvme_start_queues(&ctrl
->ctrl
);
533 nvme_loop_destroy_io_queues(ctrl
);
535 nvme_loop_destroy_admin_queue(ctrl
);
537 dev_warn(ctrl
->ctrl
.device
, "Removing after reset failure\n");
538 nvme_uninit_ctrl(&ctrl
->ctrl
);
539 nvme_put_ctrl(&ctrl
->ctrl
);
542 static int nvme_loop_reset_ctrl(struct nvme_ctrl
*nctrl
)
544 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
546 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_RESETTING
))
549 if (!schedule_work(&ctrl
->reset_work
))
552 flush_work(&ctrl
->reset_work
);
557 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops
= {
559 .module
= THIS_MODULE
,
560 .flags
= NVME_F_FABRICS
,
561 .reg_read32
= nvmf_reg_read32
,
562 .reg_read64
= nvmf_reg_read64
,
563 .reg_write32
= nvmf_reg_write32
,
564 .reset_ctrl
= nvme_loop_reset_ctrl
,
565 .free_ctrl
= nvme_loop_free_ctrl
,
566 .submit_async_event
= nvme_loop_submit_async_event
,
567 .delete_ctrl
= nvme_loop_del_ctrl
,
568 .get_subsysnqn
= nvmf_get_subsysnqn
,
571 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl
*ctrl
)
575 ret
= nvme_loop_init_io_queues(ctrl
);
579 memset(&ctrl
->tag_set
, 0, sizeof(ctrl
->tag_set
));
580 ctrl
->tag_set
.ops
= &nvme_loop_mq_ops
;
581 ctrl
->tag_set
.queue_depth
= ctrl
->ctrl
.opts
->queue_size
;
582 ctrl
->tag_set
.reserved_tags
= 1; /* fabric connect */
583 ctrl
->tag_set
.numa_node
= NUMA_NO_NODE
;
584 ctrl
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
585 ctrl
->tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
586 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
587 ctrl
->tag_set
.driver_data
= ctrl
;
588 ctrl
->tag_set
.nr_hw_queues
= ctrl
->queue_count
- 1;
589 ctrl
->tag_set
.timeout
= NVME_IO_TIMEOUT
;
590 ctrl
->ctrl
.tagset
= &ctrl
->tag_set
;
592 ret
= blk_mq_alloc_tag_set(&ctrl
->tag_set
);
594 goto out_destroy_queues
;
596 ctrl
->ctrl
.connect_q
= blk_mq_init_queue(&ctrl
->tag_set
);
597 if (IS_ERR(ctrl
->ctrl
.connect_q
)) {
598 ret
= PTR_ERR(ctrl
->ctrl
.connect_q
);
599 goto out_free_tagset
;
602 ret
= nvme_loop_connect_io_queues(ctrl
);
604 goto out_cleanup_connect_q
;
608 out_cleanup_connect_q
:
609 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
611 blk_mq_free_tag_set(&ctrl
->tag_set
);
613 nvme_loop_destroy_io_queues(ctrl
);
617 static struct nvme_ctrl
*nvme_loop_create_ctrl(struct device
*dev
,
618 struct nvmf_ctrl_options
*opts
)
620 struct nvme_loop_ctrl
*ctrl
;
624 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
626 return ERR_PTR(-ENOMEM
);
627 ctrl
->ctrl
.opts
= opts
;
628 INIT_LIST_HEAD(&ctrl
->list
);
630 INIT_WORK(&ctrl
->delete_work
, nvme_loop_del_ctrl_work
);
631 INIT_WORK(&ctrl
->reset_work
, nvme_loop_reset_ctrl_work
);
633 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_loop_ctrl_ops
,
634 0 /* no quirks, we're perfect! */);
638 spin_lock_init(&ctrl
->lock
);
642 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
643 ctrl
->ctrl
.kato
= opts
->kato
;
645 ctrl
->queues
= kcalloc(opts
->nr_io_queues
+ 1, sizeof(*ctrl
->queues
),
648 goto out_uninit_ctrl
;
650 ret
= nvme_loop_configure_admin_queue(ctrl
);
652 goto out_free_queues
;
654 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
655 /* warn if maxcmd is lower than queue_size */
656 dev_warn(ctrl
->ctrl
.device
,
657 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
658 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
659 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
662 if (opts
->nr_io_queues
) {
663 ret
= nvme_loop_create_io_queues(ctrl
);
665 goto out_remove_admin_queue
;
668 nvme_loop_init_iod(ctrl
, &ctrl
->async_event_iod
, 0);
670 dev_info(ctrl
->ctrl
.device
,
671 "new ctrl: \"%s\"\n", ctrl
->ctrl
.opts
->subsysnqn
);
673 kref_get(&ctrl
->ctrl
.kref
);
675 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
676 WARN_ON_ONCE(!changed
);
678 mutex_lock(&nvme_loop_ctrl_mutex
);
679 list_add_tail(&ctrl
->list
, &nvme_loop_ctrl_list
);
680 mutex_unlock(&nvme_loop_ctrl_mutex
);
682 if (opts
->nr_io_queues
) {
683 nvme_queue_scan(&ctrl
->ctrl
);
684 nvme_queue_async_events(&ctrl
->ctrl
);
689 out_remove_admin_queue
:
690 nvme_loop_destroy_admin_queue(ctrl
);
694 nvme_uninit_ctrl(&ctrl
->ctrl
);
696 nvme_put_ctrl(&ctrl
->ctrl
);
702 static int nvme_loop_add_port(struct nvmet_port
*port
)
705 * XXX: disalow adding more than one port so
706 * there is no connection rejections when a
707 * a subsystem is assigned to a port for which
708 * loop doesn't have a pointer.
709 * This scenario would be possible if we allowed
710 * more than one port to be added and a subsystem
711 * was assigned to a port other than nvmet_loop_port.
717 nvmet_loop_port
= port
;
721 static void nvme_loop_remove_port(struct nvmet_port
*port
)
723 if (port
== nvmet_loop_port
)
724 nvmet_loop_port
= NULL
;
727 static struct nvmet_fabrics_ops nvme_loop_ops
= {
728 .owner
= THIS_MODULE
,
729 .type
= NVMF_TRTYPE_LOOP
,
730 .add_port
= nvme_loop_add_port
,
731 .remove_port
= nvme_loop_remove_port
,
732 .queue_response
= nvme_loop_queue_response
,
733 .delete_ctrl
= nvme_loop_delete_ctrl
,
736 static struct nvmf_transport_ops nvme_loop_transport
= {
738 .create_ctrl
= nvme_loop_create_ctrl
,
741 static int __init
nvme_loop_init_module(void)
745 ret
= nvmet_register_transport(&nvme_loop_ops
);
749 ret
= nvmf_register_transport(&nvme_loop_transport
);
751 nvmet_unregister_transport(&nvme_loop_ops
);
756 static void __exit
nvme_loop_cleanup_module(void)
758 struct nvme_loop_ctrl
*ctrl
, *next
;
760 nvmf_unregister_transport(&nvme_loop_transport
);
761 nvmet_unregister_transport(&nvme_loop_ops
);
763 mutex_lock(&nvme_loop_ctrl_mutex
);
764 list_for_each_entry_safe(ctrl
, next
, &nvme_loop_ctrl_list
, list
)
765 __nvme_loop_del_ctrl(ctrl
);
766 mutex_unlock(&nvme_loop_ctrl_mutex
);
768 flush_scheduled_work();
771 module_init(nvme_loop_init_module
);
772 module_exit(nvme_loop_cleanup_module
);
774 MODULE_LICENSE("GPL v2");
775 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */