2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/delay.h>
17 #include <linux/blk-mq.h>
18 #include <linux/nvme.h>
19 #include <linux/module.h>
20 #include <linux/parser.h>
21 #include <linux/t10-pi.h>
23 #include "../host/nvme.h"
24 #include "../host/fabrics.h"
26 #define NVME_LOOP_AQ_DEPTH 256
28 #define NVME_LOOP_MAX_SEGMENTS 256
31 * We handle AEN commands ourselves and don't even let the
32 * block layer know about them.
34 #define NVME_LOOP_NR_AEN_COMMANDS 1
35 #define NVME_LOOP_AQ_BLKMQ_DEPTH \
36 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
38 struct nvme_loop_iod
{
39 struct nvme_request nvme_req
;
40 struct nvme_command cmd
;
41 struct nvme_completion rsp
;
43 struct nvme_loop_queue
*queue
;
44 struct work_struct work
;
45 struct sg_table sg_table
;
46 struct scatterlist first_sgl
[];
49 struct nvme_loop_ctrl
{
51 struct nvme_loop_queue
*queues
;
54 struct blk_mq_tag_set admin_tag_set
;
56 struct list_head list
;
58 struct blk_mq_tag_set tag_set
;
59 struct nvme_loop_iod async_event_iod
;
60 struct nvme_ctrl ctrl
;
62 struct nvmet_ctrl
*target_ctrl
;
63 struct work_struct delete_work
;
64 struct work_struct reset_work
;
67 static inline struct nvme_loop_ctrl
*to_loop_ctrl(struct nvme_ctrl
*ctrl
)
69 return container_of(ctrl
, struct nvme_loop_ctrl
, ctrl
);
72 struct nvme_loop_queue
{
73 struct nvmet_cq nvme_cq
;
74 struct nvmet_sq nvme_sq
;
75 struct nvme_loop_ctrl
*ctrl
;
78 static struct nvmet_port
*nvmet_loop_port
;
80 static LIST_HEAD(nvme_loop_ctrl_list
);
81 static DEFINE_MUTEX(nvme_loop_ctrl_mutex
);
83 static void nvme_loop_queue_response(struct nvmet_req
*nvme_req
);
84 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*ctrl
);
86 static struct nvmet_fabrics_ops nvme_loop_ops
;
88 static inline int nvme_loop_queue_idx(struct nvme_loop_queue
*queue
)
90 return queue
- queue
->ctrl
->queues
;
93 static void nvme_loop_complete_rq(struct request
*req
)
95 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
98 nvme_cleanup_cmd(req
);
99 sg_free_table_chained(&iod
->sg_table
, true);
101 if (unlikely(req
->errors
)) {
102 if (nvme_req_needs_retry(req
, req
->errors
)) {
103 nvme_requeue_req(req
);
107 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
110 error
= nvme_error_status(req
->errors
);
113 blk_mq_end_request(req
, error
);
116 static void nvme_loop_queue_response(struct nvmet_req
*req
)
118 struct nvme_loop_iod
*iod
=
119 container_of(req
, struct nvme_loop_iod
, req
);
120 struct nvme_completion
*cqe
= &iod
->rsp
;
123 * AEN requests are special as they don't time out and can
124 * survive any kind of queue freeze and often don't respond to
125 * aborts. We don't even bother to allocate a struct request
126 * for them but rather special case them here.
128 if (unlikely(nvme_loop_queue_idx(iod
->queue
) == 0 &&
129 cqe
->command_id
>= NVME_LOOP_AQ_BLKMQ_DEPTH
)) {
130 nvme_complete_async_event(&iod
->queue
->ctrl
->ctrl
, cqe
->status
,
133 struct request
*rq
= blk_mq_rq_from_pdu(iod
);
135 iod
->nvme_req
.result
= cqe
->result
;
136 blk_mq_complete_request(rq
, le16_to_cpu(cqe
->status
) >> 1);
140 static void nvme_loop_execute_work(struct work_struct
*work
)
142 struct nvme_loop_iod
*iod
=
143 container_of(work
, struct nvme_loop_iod
, work
);
145 iod
->req
.execute(&iod
->req
);
148 static enum blk_eh_timer_return
149 nvme_loop_timeout(struct request
*rq
, bool reserved
)
151 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(rq
);
153 /* queue error recovery */
154 schedule_work(&iod
->queue
->ctrl
->reset_work
);
156 /* fail with DNR on admin cmd timeout */
157 rq
->errors
= NVME_SC_ABORT_REQ
| NVME_SC_DNR
;
159 return BLK_EH_HANDLED
;
162 static int nvme_loop_queue_rq(struct blk_mq_hw_ctx
*hctx
,
163 const struct blk_mq_queue_data
*bd
)
165 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
166 struct nvme_loop_queue
*queue
= hctx
->driver_data
;
167 struct request
*req
= bd
->rq
;
168 struct nvme_loop_iod
*iod
= blk_mq_rq_to_pdu(req
);
171 ret
= nvme_setup_cmd(ns
, req
, &iod
->cmd
);
172 if (ret
!= BLK_MQ_RQ_QUEUE_OK
)
175 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
176 iod
->req
.port
= nvmet_loop_port
;
177 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
,
178 &queue
->nvme_sq
, &nvme_loop_ops
)) {
179 nvme_cleanup_cmd(req
);
180 blk_mq_start_request(req
);
181 nvme_loop_queue_response(&iod
->req
);
182 return BLK_MQ_RQ_QUEUE_OK
;
185 if (blk_rq_bytes(req
)) {
186 iod
->sg_table
.sgl
= iod
->first_sgl
;
187 ret
= sg_alloc_table_chained(&iod
->sg_table
,
188 blk_rq_nr_phys_segments(req
),
191 return BLK_MQ_RQ_QUEUE_BUSY
;
193 iod
->req
.sg
= iod
->sg_table
.sgl
;
194 iod
->req
.sg_cnt
= blk_rq_map_sg(req
->q
, req
, iod
->sg_table
.sgl
);
197 blk_mq_start_request(req
);
199 schedule_work(&iod
->work
);
200 return BLK_MQ_RQ_QUEUE_OK
;
203 static void nvme_loop_submit_async_event(struct nvme_ctrl
*arg
, int aer_idx
)
205 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(arg
);
206 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
207 struct nvme_loop_iod
*iod
= &ctrl
->async_event_iod
;
209 memset(&iod
->cmd
, 0, sizeof(iod
->cmd
));
210 iod
->cmd
.common
.opcode
= nvme_admin_async_event
;
211 iod
->cmd
.common
.command_id
= NVME_LOOP_AQ_BLKMQ_DEPTH
;
212 iod
->cmd
.common
.flags
|= NVME_CMD_SGL_METABUF
;
214 if (!nvmet_req_init(&iod
->req
, &queue
->nvme_cq
, &queue
->nvme_sq
,
216 dev_err(ctrl
->ctrl
.device
, "failed async event work\n");
220 schedule_work(&iod
->work
);
223 static int nvme_loop_init_iod(struct nvme_loop_ctrl
*ctrl
,
224 struct nvme_loop_iod
*iod
, unsigned int queue_idx
)
226 BUG_ON(queue_idx
>= ctrl
->queue_count
);
228 iod
->req
.cmd
= &iod
->cmd
;
229 iod
->req
.rsp
= &iod
->rsp
;
230 iod
->queue
= &ctrl
->queues
[queue_idx
];
231 INIT_WORK(&iod
->work
, nvme_loop_execute_work
);
235 static int nvme_loop_init_request(void *data
, struct request
*req
,
236 unsigned int hctx_idx
, unsigned int rq_idx
,
237 unsigned int numa_node
)
239 return nvme_loop_init_iod(data
, blk_mq_rq_to_pdu(req
), hctx_idx
+ 1);
242 static int nvme_loop_init_admin_request(void *data
, struct request
*req
,
243 unsigned int hctx_idx
, unsigned int rq_idx
,
244 unsigned int numa_node
)
246 return nvme_loop_init_iod(data
, blk_mq_rq_to_pdu(req
), 0);
249 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
250 unsigned int hctx_idx
)
252 struct nvme_loop_ctrl
*ctrl
= data
;
253 struct nvme_loop_queue
*queue
= &ctrl
->queues
[hctx_idx
+ 1];
255 BUG_ON(hctx_idx
>= ctrl
->queue_count
);
257 hctx
->driver_data
= queue
;
261 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
262 unsigned int hctx_idx
)
264 struct nvme_loop_ctrl
*ctrl
= data
;
265 struct nvme_loop_queue
*queue
= &ctrl
->queues
[0];
267 BUG_ON(hctx_idx
!= 0);
269 hctx
->driver_data
= queue
;
273 static struct blk_mq_ops nvme_loop_mq_ops
= {
274 .queue_rq
= nvme_loop_queue_rq
,
275 .complete
= nvme_loop_complete_rq
,
276 .init_request
= nvme_loop_init_request
,
277 .init_hctx
= nvme_loop_init_hctx
,
278 .timeout
= nvme_loop_timeout
,
281 static struct blk_mq_ops nvme_loop_admin_mq_ops
= {
282 .queue_rq
= nvme_loop_queue_rq
,
283 .complete
= nvme_loop_complete_rq
,
284 .init_request
= nvme_loop_init_admin_request
,
285 .init_hctx
= nvme_loop_init_admin_hctx
,
286 .timeout
= nvme_loop_timeout
,
289 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl
*ctrl
)
291 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
292 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
293 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
296 static void nvme_loop_free_ctrl(struct nvme_ctrl
*nctrl
)
298 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
300 if (list_empty(&ctrl
->list
))
303 mutex_lock(&nvme_loop_ctrl_mutex
);
304 list_del(&ctrl
->list
);
305 mutex_unlock(&nvme_loop_ctrl_mutex
);
308 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
309 blk_mq_free_tag_set(&ctrl
->tag_set
);
312 nvmf_free_options(nctrl
->opts
);
317 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl
*ctrl
)
321 memset(&ctrl
->admin_tag_set
, 0, sizeof(ctrl
->admin_tag_set
));
322 ctrl
->admin_tag_set
.ops
= &nvme_loop_admin_mq_ops
;
323 ctrl
->admin_tag_set
.queue_depth
= NVME_LOOP_AQ_BLKMQ_DEPTH
;
324 ctrl
->admin_tag_set
.reserved_tags
= 2; /* connect + keep-alive */
325 ctrl
->admin_tag_set
.numa_node
= NUMA_NO_NODE
;
326 ctrl
->admin_tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
327 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
328 ctrl
->admin_tag_set
.driver_data
= ctrl
;
329 ctrl
->admin_tag_set
.nr_hw_queues
= 1;
330 ctrl
->admin_tag_set
.timeout
= ADMIN_TIMEOUT
;
332 ctrl
->queues
[0].ctrl
= ctrl
;
333 error
= nvmet_sq_init(&ctrl
->queues
[0].nvme_sq
);
336 ctrl
->queue_count
= 1;
338 error
= blk_mq_alloc_tag_set(&ctrl
->admin_tag_set
);
342 ctrl
->ctrl
.admin_q
= blk_mq_init_queue(&ctrl
->admin_tag_set
);
343 if (IS_ERR(ctrl
->ctrl
.admin_q
)) {
344 error
= PTR_ERR(ctrl
->ctrl
.admin_q
);
345 goto out_free_tagset
;
348 error
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
350 goto out_cleanup_queue
;
352 error
= nvmf_reg_read64(&ctrl
->ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
354 dev_err(ctrl
->ctrl
.device
,
355 "prop_get NVME_REG_CAP failed\n");
356 goto out_cleanup_queue
;
360 min_t(int, NVME_CAP_MQES(ctrl
->cap
) + 1, ctrl
->ctrl
.sqsize
);
362 error
= nvme_enable_ctrl(&ctrl
->ctrl
, ctrl
->cap
);
364 goto out_cleanup_queue
;
366 ctrl
->ctrl
.max_hw_sectors
=
367 (NVME_LOOP_MAX_SEGMENTS
- 1) << (PAGE_SHIFT
- 9);
369 error
= nvme_init_identify(&ctrl
->ctrl
);
371 goto out_cleanup_queue
;
373 nvme_start_keep_alive(&ctrl
->ctrl
);
378 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
380 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
382 nvmet_sq_destroy(&ctrl
->queues
[0].nvme_sq
);
386 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl
*ctrl
)
390 nvme_stop_keep_alive(&ctrl
->ctrl
);
392 if (ctrl
->queue_count
> 1) {
393 nvme_stop_queues(&ctrl
->ctrl
);
394 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
395 nvme_cancel_request
, &ctrl
->ctrl
);
397 for (i
= 1; i
< ctrl
->queue_count
; i
++)
398 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
401 if (ctrl
->ctrl
.state
== NVME_CTRL_LIVE
)
402 nvme_shutdown_ctrl(&ctrl
->ctrl
);
404 blk_mq_stop_hw_queues(ctrl
->ctrl
.admin_q
);
405 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
406 nvme_cancel_request
, &ctrl
->ctrl
);
407 nvme_loop_destroy_admin_queue(ctrl
);
410 static void nvme_loop_del_ctrl_work(struct work_struct
*work
)
412 struct nvme_loop_ctrl
*ctrl
= container_of(work
,
413 struct nvme_loop_ctrl
, delete_work
);
415 nvme_uninit_ctrl(&ctrl
->ctrl
);
416 nvme_loop_shutdown_ctrl(ctrl
);
417 nvme_put_ctrl(&ctrl
->ctrl
);
420 static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl
*ctrl
)
422 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_DELETING
))
425 if (!schedule_work(&ctrl
->delete_work
))
431 static int nvme_loop_del_ctrl(struct nvme_ctrl
*nctrl
)
433 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
436 ret
= __nvme_loop_del_ctrl(ctrl
);
440 flush_work(&ctrl
->delete_work
);
445 static void nvme_loop_delete_ctrl(struct nvmet_ctrl
*nctrl
)
447 struct nvme_loop_ctrl
*ctrl
;
449 mutex_lock(&nvme_loop_ctrl_mutex
);
450 list_for_each_entry(ctrl
, &nvme_loop_ctrl_list
, list
) {
451 if (ctrl
->ctrl
.cntlid
== nctrl
->cntlid
)
452 __nvme_loop_del_ctrl(ctrl
);
454 mutex_unlock(&nvme_loop_ctrl_mutex
);
457 static void nvme_loop_reset_ctrl_work(struct work_struct
*work
)
459 struct nvme_loop_ctrl
*ctrl
= container_of(work
,
460 struct nvme_loop_ctrl
, reset_work
);
464 nvme_loop_shutdown_ctrl(ctrl
);
466 ret
= nvme_loop_configure_admin_queue(ctrl
);
470 for (i
= 1; i
<= ctrl
->ctrl
.opts
->nr_io_queues
; i
++) {
471 ctrl
->queues
[i
].ctrl
= ctrl
;
472 ret
= nvmet_sq_init(&ctrl
->queues
[i
].nvme_sq
);
474 goto out_free_queues
;
479 for (i
= 1; i
<= ctrl
->ctrl
.opts
->nr_io_queues
; i
++) {
480 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
482 goto out_free_queues
;
485 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
486 WARN_ON_ONCE(!changed
);
488 nvme_queue_scan(&ctrl
->ctrl
);
489 nvme_queue_async_events(&ctrl
->ctrl
);
491 nvme_start_queues(&ctrl
->ctrl
);
496 for (i
= 1; i
< ctrl
->queue_count
; i
++)
497 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
498 nvme_loop_destroy_admin_queue(ctrl
);
500 dev_warn(ctrl
->ctrl
.device
, "Removing after reset failure\n");
501 nvme_uninit_ctrl(&ctrl
->ctrl
);
502 nvme_put_ctrl(&ctrl
->ctrl
);
505 static int nvme_loop_reset_ctrl(struct nvme_ctrl
*nctrl
)
507 struct nvme_loop_ctrl
*ctrl
= to_loop_ctrl(nctrl
);
509 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_RESETTING
))
512 if (!schedule_work(&ctrl
->reset_work
))
515 flush_work(&ctrl
->reset_work
);
520 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops
= {
522 .module
= THIS_MODULE
,
524 .reg_read32
= nvmf_reg_read32
,
525 .reg_read64
= nvmf_reg_read64
,
526 .reg_write32
= nvmf_reg_write32
,
527 .reset_ctrl
= nvme_loop_reset_ctrl
,
528 .free_ctrl
= nvme_loop_free_ctrl
,
529 .submit_async_event
= nvme_loop_submit_async_event
,
530 .delete_ctrl
= nvme_loop_del_ctrl
,
531 .get_subsysnqn
= nvmf_get_subsysnqn
,
534 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl
*ctrl
)
536 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
539 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &opts
->nr_io_queues
);
540 if (ret
|| !opts
->nr_io_queues
)
543 dev_info(ctrl
->ctrl
.device
, "creating %d I/O queues.\n",
546 for (i
= 1; i
<= opts
->nr_io_queues
; i
++) {
547 ctrl
->queues
[i
].ctrl
= ctrl
;
548 ret
= nvmet_sq_init(&ctrl
->queues
[i
].nvme_sq
);
550 goto out_destroy_queues
;
555 memset(&ctrl
->tag_set
, 0, sizeof(ctrl
->tag_set
));
556 ctrl
->tag_set
.ops
= &nvme_loop_mq_ops
;
557 ctrl
->tag_set
.queue_depth
= ctrl
->ctrl
.opts
->queue_size
;
558 ctrl
->tag_set
.reserved_tags
= 1; /* fabric connect */
559 ctrl
->tag_set
.numa_node
= NUMA_NO_NODE
;
560 ctrl
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
561 ctrl
->tag_set
.cmd_size
= sizeof(struct nvme_loop_iod
) +
562 SG_CHUNK_SIZE
* sizeof(struct scatterlist
);
563 ctrl
->tag_set
.driver_data
= ctrl
;
564 ctrl
->tag_set
.nr_hw_queues
= ctrl
->queue_count
- 1;
565 ctrl
->tag_set
.timeout
= NVME_IO_TIMEOUT
;
566 ctrl
->ctrl
.tagset
= &ctrl
->tag_set
;
568 ret
= blk_mq_alloc_tag_set(&ctrl
->tag_set
);
570 goto out_destroy_queues
;
572 ctrl
->ctrl
.connect_q
= blk_mq_init_queue(&ctrl
->tag_set
);
573 if (IS_ERR(ctrl
->ctrl
.connect_q
)) {
574 ret
= PTR_ERR(ctrl
->ctrl
.connect_q
);
575 goto out_free_tagset
;
578 for (i
= 1; i
<= opts
->nr_io_queues
; i
++) {
579 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
581 goto out_cleanup_connect_q
;
586 out_cleanup_connect_q
:
587 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
589 blk_mq_free_tag_set(&ctrl
->tag_set
);
591 for (i
= 1; i
< ctrl
->queue_count
; i
++)
592 nvmet_sq_destroy(&ctrl
->queues
[i
].nvme_sq
);
596 static struct nvme_ctrl
*nvme_loop_create_ctrl(struct device
*dev
,
597 struct nvmf_ctrl_options
*opts
)
599 struct nvme_loop_ctrl
*ctrl
;
603 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
605 return ERR_PTR(-ENOMEM
);
606 ctrl
->ctrl
.opts
= opts
;
607 INIT_LIST_HEAD(&ctrl
->list
);
609 INIT_WORK(&ctrl
->delete_work
, nvme_loop_del_ctrl_work
);
610 INIT_WORK(&ctrl
->reset_work
, nvme_loop_reset_ctrl_work
);
612 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_loop_ctrl_ops
,
613 0 /* no quirks, we're perfect! */);
617 spin_lock_init(&ctrl
->lock
);
621 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
622 ctrl
->ctrl
.kato
= opts
->kato
;
624 ctrl
->queues
= kcalloc(opts
->nr_io_queues
+ 1, sizeof(*ctrl
->queues
),
627 goto out_uninit_ctrl
;
629 ret
= nvme_loop_configure_admin_queue(ctrl
);
631 goto out_free_queues
;
633 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
634 /* warn if maxcmd is lower than queue_size */
635 dev_warn(ctrl
->ctrl
.device
,
636 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
637 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
638 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
641 if (opts
->nr_io_queues
) {
642 ret
= nvme_loop_create_io_queues(ctrl
);
644 goto out_remove_admin_queue
;
647 nvme_loop_init_iod(ctrl
, &ctrl
->async_event_iod
, 0);
649 dev_info(ctrl
->ctrl
.device
,
650 "new ctrl: \"%s\"\n", ctrl
->ctrl
.opts
->subsysnqn
);
652 kref_get(&ctrl
->ctrl
.kref
);
654 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
655 WARN_ON_ONCE(!changed
);
657 mutex_lock(&nvme_loop_ctrl_mutex
);
658 list_add_tail(&ctrl
->list
, &nvme_loop_ctrl_list
);
659 mutex_unlock(&nvme_loop_ctrl_mutex
);
661 if (opts
->nr_io_queues
) {
662 nvme_queue_scan(&ctrl
->ctrl
);
663 nvme_queue_async_events(&ctrl
->ctrl
);
668 out_remove_admin_queue
:
669 nvme_loop_destroy_admin_queue(ctrl
);
673 nvme_uninit_ctrl(&ctrl
->ctrl
);
675 nvme_put_ctrl(&ctrl
->ctrl
);
681 static int nvme_loop_add_port(struct nvmet_port
*port
)
684 * XXX: disalow adding more than one port so
685 * there is no connection rejections when a
686 * a subsystem is assigned to a port for which
687 * loop doesn't have a pointer.
688 * This scenario would be possible if we allowed
689 * more than one port to be added and a subsystem
690 * was assigned to a port other than nvmet_loop_port.
696 nvmet_loop_port
= port
;
700 static void nvme_loop_remove_port(struct nvmet_port
*port
)
702 if (port
== nvmet_loop_port
)
703 nvmet_loop_port
= NULL
;
706 static struct nvmet_fabrics_ops nvme_loop_ops
= {
707 .owner
= THIS_MODULE
,
708 .type
= NVMF_TRTYPE_LOOP
,
709 .add_port
= nvme_loop_add_port
,
710 .remove_port
= nvme_loop_remove_port
,
711 .queue_response
= nvme_loop_queue_response
,
712 .delete_ctrl
= nvme_loop_delete_ctrl
,
715 static struct nvmf_transport_ops nvme_loop_transport
= {
717 .create_ctrl
= nvme_loop_create_ctrl
,
720 static int __init
nvme_loop_init_module(void)
724 ret
= nvmet_register_transport(&nvme_loop_ops
);
727 nvmf_register_transport(&nvme_loop_transport
);
731 static void __exit
nvme_loop_cleanup_module(void)
733 struct nvme_loop_ctrl
*ctrl
, *next
;
735 nvmf_unregister_transport(&nvme_loop_transport
);
736 nvmet_unregister_transport(&nvme_loop_ops
);
738 mutex_lock(&nvme_loop_ctrl_mutex
);
739 list_for_each_entry_safe(ctrl
, next
, &nvme_loop_ctrl_list
, list
)
740 __nvme_loop_del_ctrl(ctrl
);
741 mutex_unlock(&nvme_loop_ctrl_mutex
);
743 flush_scheduled_work();
746 module_init(nvme_loop_init_module
);
747 module_exit(nvme_loop_cleanup_module
);
749 MODULE_LICENSE("GPL v2");
750 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */