2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/rculist.h>
21 static struct nvmet_fabrics_ops
*nvmet_transports
[NVMF_TRTYPE_MAX
];
22 static DEFINE_IDA(cntlid_ida
);
25 * This read/write semaphore is used to synchronize access to configuration
26 * information on a target system that will result in discovery log page
27 * information change for at least one host.
28 * The full list of resources to protected by this semaphore is:
31 * - per-subsystem allowed hosts list
32 * - allow_any_host subsystem attribute
34 * - the nvmet_transports array
36 * When updating any of those lists/structures write lock should be obtained,
37 * while when reading (popolating discovery log page or checking host-subsystem
38 * link) read lock is obtained to allow concurrent reads.
40 DECLARE_RWSEM(nvmet_config_sem
);
42 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
43 const char *subsysnqn
);
45 u16
nvmet_copy_to_sgl(struct nvmet_req
*req
, off_t off
, const void *buf
,
48 if (sg_pcopy_from_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
)
49 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
53 u16
nvmet_copy_from_sgl(struct nvmet_req
*req
, off_t off
, void *buf
, size_t len
)
55 if (sg_pcopy_to_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
)
56 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
60 static u32
nvmet_async_event_result(struct nvmet_async_event
*aen
)
62 return aen
->event_type
| (aen
->event_info
<< 8) | (aen
->log_page
<< 16);
65 static void nvmet_async_events_free(struct nvmet_ctrl
*ctrl
)
67 struct nvmet_req
*req
;
70 mutex_lock(&ctrl
->lock
);
71 if (!ctrl
->nr_async_event_cmds
) {
72 mutex_unlock(&ctrl
->lock
);
76 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
77 mutex_unlock(&ctrl
->lock
);
78 nvmet_req_complete(req
, NVME_SC_INTERNAL
| NVME_SC_DNR
);
82 static void nvmet_async_event_work(struct work_struct
*work
)
84 struct nvmet_ctrl
*ctrl
=
85 container_of(work
, struct nvmet_ctrl
, async_event_work
);
86 struct nvmet_async_event
*aen
;
87 struct nvmet_req
*req
;
90 mutex_lock(&ctrl
->lock
);
91 aen
= list_first_entry_or_null(&ctrl
->async_events
,
92 struct nvmet_async_event
, entry
);
93 if (!aen
|| !ctrl
->nr_async_event_cmds
) {
94 mutex_unlock(&ctrl
->lock
);
98 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
99 nvmet_set_result(req
, nvmet_async_event_result(aen
));
101 list_del(&aen
->entry
);
104 mutex_unlock(&ctrl
->lock
);
105 nvmet_req_complete(req
, 0);
109 static void nvmet_add_async_event(struct nvmet_ctrl
*ctrl
, u8 event_type
,
110 u8 event_info
, u8 log_page
)
112 struct nvmet_async_event
*aen
;
114 aen
= kmalloc(sizeof(*aen
), GFP_KERNEL
);
118 aen
->event_type
= event_type
;
119 aen
->event_info
= event_info
;
120 aen
->log_page
= log_page
;
122 mutex_lock(&ctrl
->lock
);
123 list_add_tail(&aen
->entry
, &ctrl
->async_events
);
124 mutex_unlock(&ctrl
->lock
);
126 schedule_work(&ctrl
->async_event_work
);
129 int nvmet_register_transport(struct nvmet_fabrics_ops
*ops
)
133 down_write(&nvmet_config_sem
);
134 if (nvmet_transports
[ops
->type
])
137 nvmet_transports
[ops
->type
] = ops
;
138 up_write(&nvmet_config_sem
);
142 EXPORT_SYMBOL_GPL(nvmet_register_transport
);
144 void nvmet_unregister_transport(struct nvmet_fabrics_ops
*ops
)
146 down_write(&nvmet_config_sem
);
147 nvmet_transports
[ops
->type
] = NULL
;
148 up_write(&nvmet_config_sem
);
150 EXPORT_SYMBOL_GPL(nvmet_unregister_transport
);
152 int nvmet_enable_port(struct nvmet_port
*port
)
154 struct nvmet_fabrics_ops
*ops
;
157 lockdep_assert_held(&nvmet_config_sem
);
159 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
161 up_write(&nvmet_config_sem
);
162 request_module("nvmet-transport-%d", port
->disc_addr
.trtype
);
163 down_write(&nvmet_config_sem
);
164 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
166 pr_err("transport type %d not supported\n",
167 port
->disc_addr
.trtype
);
172 if (!try_module_get(ops
->owner
))
175 ret
= ops
->add_port(port
);
177 module_put(ops
->owner
);
181 port
->enabled
= true;
185 void nvmet_disable_port(struct nvmet_port
*port
)
187 struct nvmet_fabrics_ops
*ops
;
189 lockdep_assert_held(&nvmet_config_sem
);
191 port
->enabled
= false;
193 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
194 ops
->remove_port(port
);
195 module_put(ops
->owner
);
198 static void nvmet_keep_alive_timer(struct work_struct
*work
)
200 struct nvmet_ctrl
*ctrl
= container_of(to_delayed_work(work
),
201 struct nvmet_ctrl
, ka_work
);
203 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
204 ctrl
->cntlid
, ctrl
->kato
);
206 nvmet_ctrl_fatal_error(ctrl
);
209 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
211 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
212 ctrl
->cntlid
, ctrl
->kato
);
214 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvmet_keep_alive_timer
);
215 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
218 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
220 pr_debug("ctrl %d stop keep-alive\n", ctrl
->cntlid
);
222 cancel_delayed_work_sync(&ctrl
->ka_work
);
225 static struct nvmet_ns
*__nvmet_find_namespace(struct nvmet_ctrl
*ctrl
,
230 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
231 if (ns
->nsid
== le32_to_cpu(nsid
))
238 struct nvmet_ns
*nvmet_find_namespace(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
243 ns
= __nvmet_find_namespace(ctrl
, nsid
);
245 percpu_ref_get(&ns
->ref
);
251 static void nvmet_destroy_namespace(struct percpu_ref
*ref
)
253 struct nvmet_ns
*ns
= container_of(ref
, struct nvmet_ns
, ref
);
255 complete(&ns
->disable_done
);
258 void nvmet_put_namespace(struct nvmet_ns
*ns
)
260 percpu_ref_put(&ns
->ref
);
263 int nvmet_ns_enable(struct nvmet_ns
*ns
)
265 struct nvmet_subsys
*subsys
= ns
->subsys
;
266 struct nvmet_ctrl
*ctrl
;
269 mutex_lock(&subsys
->lock
);
273 ns
->bdev
= blkdev_get_by_path(ns
->device_path
, FMODE_READ
| FMODE_WRITE
,
275 if (IS_ERR(ns
->bdev
)) {
276 pr_err("nvmet: failed to open block device %s: (%ld)\n",
277 ns
->device_path
, PTR_ERR(ns
->bdev
));
278 ret
= PTR_ERR(ns
->bdev
);
283 ns
->size
= i_size_read(ns
->bdev
->bd_inode
);
284 ns
->blksize_shift
= blksize_bits(bdev_logical_block_size(ns
->bdev
));
286 ret
= percpu_ref_init(&ns
->ref
, nvmet_destroy_namespace
,
291 if (ns
->nsid
> subsys
->max_nsid
)
292 subsys
->max_nsid
= ns
->nsid
;
295 * The namespaces list needs to be sorted to simplify the implementation
296 * of the Identify Namepace List subcommand.
298 if (list_empty(&subsys
->namespaces
)) {
299 list_add_tail_rcu(&ns
->dev_link
, &subsys
->namespaces
);
301 struct nvmet_ns
*old
;
303 list_for_each_entry_rcu(old
, &subsys
->namespaces
, dev_link
) {
304 BUG_ON(ns
->nsid
== old
->nsid
);
305 if (ns
->nsid
< old
->nsid
)
309 list_add_tail_rcu(&ns
->dev_link
, &old
->dev_link
);
312 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
313 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
, 0, 0);
318 mutex_unlock(&subsys
->lock
);
321 blkdev_put(ns
->bdev
, FMODE_WRITE
|FMODE_READ
);
326 void nvmet_ns_disable(struct nvmet_ns
*ns
)
328 struct nvmet_subsys
*subsys
= ns
->subsys
;
329 struct nvmet_ctrl
*ctrl
;
331 mutex_lock(&subsys
->lock
);
336 list_del_rcu(&ns
->dev_link
);
337 mutex_unlock(&subsys
->lock
);
340 * Now that we removed the namespaces from the lookup list, we
341 * can kill the per_cpu ref and wait for any remaining references
342 * to be dropped, as well as a RCU grace period for anyone only
343 * using the namepace under rcu_read_lock(). Note that we can't
344 * use call_rcu here as we need to ensure the namespaces have
345 * been fully destroyed before unloading the module.
347 percpu_ref_kill(&ns
->ref
);
349 wait_for_completion(&ns
->disable_done
);
350 percpu_ref_exit(&ns
->ref
);
352 mutex_lock(&subsys
->lock
);
353 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
354 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
, 0, 0);
357 blkdev_put(ns
->bdev
, FMODE_WRITE
|FMODE_READ
);
359 mutex_unlock(&subsys
->lock
);
362 void nvmet_ns_free(struct nvmet_ns
*ns
)
364 nvmet_ns_disable(ns
);
366 kfree(ns
->device_path
);
370 struct nvmet_ns
*nvmet_ns_alloc(struct nvmet_subsys
*subsys
, u32 nsid
)
374 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
378 INIT_LIST_HEAD(&ns
->dev_link
);
379 init_completion(&ns
->disable_done
);
387 static void __nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
390 nvmet_set_status(req
, status
);
392 /* XXX: need to fill in something useful for sq_head */
393 req
->rsp
->sq_head
= 0;
394 if (likely(req
->sq
)) /* may happen during early failure */
395 req
->rsp
->sq_id
= cpu_to_le16(req
->sq
->qid
);
396 req
->rsp
->command_id
= req
->cmd
->common
.command_id
;
399 nvmet_put_namespace(req
->ns
);
400 req
->ops
->queue_response(req
);
403 void nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
405 __nvmet_req_complete(req
, status
);
406 percpu_ref_put(&req
->sq
->ref
);
408 EXPORT_SYMBOL_GPL(nvmet_req_complete
);
410 void nvmet_cq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_cq
*cq
,
419 void nvmet_sq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_sq
*sq
,
428 void nvmet_sq_destroy(struct nvmet_sq
*sq
)
431 * If this is the admin queue, complete all AERs so that our
432 * queue doesn't have outstanding requests on it.
434 if (sq
->ctrl
&& sq
->ctrl
->sqs
&& sq
->ctrl
->sqs
[0] == sq
)
435 nvmet_async_events_free(sq
->ctrl
);
436 percpu_ref_kill(&sq
->ref
);
437 wait_for_completion(&sq
->free_done
);
438 percpu_ref_exit(&sq
->ref
);
441 nvmet_ctrl_put(sq
->ctrl
);
442 sq
->ctrl
= NULL
; /* allows reusing the queue later */
445 EXPORT_SYMBOL_GPL(nvmet_sq_destroy
);
447 static void nvmet_sq_free(struct percpu_ref
*ref
)
449 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
451 complete(&sq
->free_done
);
454 int nvmet_sq_init(struct nvmet_sq
*sq
)
458 ret
= percpu_ref_init(&sq
->ref
, nvmet_sq_free
, 0, GFP_KERNEL
);
460 pr_err("percpu_ref init failed!\n");
463 init_completion(&sq
->free_done
);
467 EXPORT_SYMBOL_GPL(nvmet_sq_init
);
469 bool nvmet_req_init(struct nvmet_req
*req
, struct nvmet_cq
*cq
,
470 struct nvmet_sq
*sq
, struct nvmet_fabrics_ops
*ops
)
472 u8 flags
= req
->cmd
->common
.flags
;
480 req
->rsp
->status
= 0;
482 /* no support for fused commands yet */
483 if (unlikely(flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
))) {
484 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
488 /* either variant of SGLs is fine, as we don't support metadata */
489 if (unlikely((flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METABUF
&&
490 (flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METASEG
)) {
491 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
495 if (unlikely(!req
->sq
->ctrl
))
496 /* will return an error for any Non-connect command: */
497 status
= nvmet_parse_connect_cmd(req
);
498 else if (likely(req
->sq
->qid
!= 0))
499 status
= nvmet_parse_io_cmd(req
);
500 else if (req
->cmd
->common
.opcode
== nvme_fabrics_command
)
501 status
= nvmet_parse_fabrics_cmd(req
);
502 else if (req
->sq
->ctrl
->subsys
->type
== NVME_NQN_DISC
)
503 status
= nvmet_parse_discovery_cmd(req
);
505 status
= nvmet_parse_admin_cmd(req
);
510 if (unlikely(!percpu_ref_tryget_live(&sq
->ref
))) {
511 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
518 __nvmet_req_complete(req
, status
);
521 EXPORT_SYMBOL_GPL(nvmet_req_init
);
523 static inline bool nvmet_cc_en(u32 cc
)
528 static inline u8
nvmet_cc_css(u32 cc
)
530 return (cc
>> 4) & 0x7;
533 static inline u8
nvmet_cc_mps(u32 cc
)
535 return (cc
>> 7) & 0xf;
538 static inline u8
nvmet_cc_ams(u32 cc
)
540 return (cc
>> 11) & 0x7;
543 static inline u8
nvmet_cc_shn(u32 cc
)
545 return (cc
>> 14) & 0x3;
548 static inline u8
nvmet_cc_iosqes(u32 cc
)
550 return (cc
>> 16) & 0xf;
553 static inline u8
nvmet_cc_iocqes(u32 cc
)
555 return (cc
>> 20) & 0xf;
558 static void nvmet_start_ctrl(struct nvmet_ctrl
*ctrl
)
560 lockdep_assert_held(&ctrl
->lock
);
562 if (nvmet_cc_iosqes(ctrl
->cc
) != NVME_NVM_IOSQES
||
563 nvmet_cc_iocqes(ctrl
->cc
) != NVME_NVM_IOCQES
||
564 nvmet_cc_mps(ctrl
->cc
) != 0 ||
565 nvmet_cc_ams(ctrl
->cc
) != 0 ||
566 nvmet_cc_css(ctrl
->cc
) != 0) {
567 ctrl
->csts
= NVME_CSTS_CFS
;
571 ctrl
->csts
= NVME_CSTS_RDY
;
574 static void nvmet_clear_ctrl(struct nvmet_ctrl
*ctrl
)
576 lockdep_assert_held(&ctrl
->lock
);
578 /* XXX: tear down queues? */
579 ctrl
->csts
&= ~NVME_CSTS_RDY
;
583 void nvmet_update_cc(struct nvmet_ctrl
*ctrl
, u32
new)
587 mutex_lock(&ctrl
->lock
);
591 if (nvmet_cc_en(new) && !nvmet_cc_en(old
))
592 nvmet_start_ctrl(ctrl
);
593 if (!nvmet_cc_en(new) && nvmet_cc_en(old
))
594 nvmet_clear_ctrl(ctrl
);
595 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old
)) {
596 nvmet_clear_ctrl(ctrl
);
597 ctrl
->csts
|= NVME_CSTS_SHST_CMPLT
;
599 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old
))
600 ctrl
->csts
&= ~NVME_CSTS_SHST_CMPLT
;
601 mutex_unlock(&ctrl
->lock
);
604 static void nvmet_init_cap(struct nvmet_ctrl
*ctrl
)
606 /* command sets supported: NVMe command set: */
607 ctrl
->cap
= (1ULL << 37);
608 /* CC.EN timeout in 500msec units: */
609 ctrl
->cap
|= (15ULL << 24);
610 /* maximum queue entries supported: */
611 ctrl
->cap
|= NVMET_QUEUE_SIZE
- 1;
614 u16
nvmet_ctrl_find_get(const char *subsysnqn
, const char *hostnqn
, u16 cntlid
,
615 struct nvmet_req
*req
, struct nvmet_ctrl
**ret
)
617 struct nvmet_subsys
*subsys
;
618 struct nvmet_ctrl
*ctrl
;
621 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
623 pr_warn("connect request for invalid subsystem %s!\n",
625 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
626 return NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
629 mutex_lock(&subsys
->lock
);
630 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
631 if (ctrl
->cntlid
== cntlid
) {
632 if (strncmp(hostnqn
, ctrl
->hostnqn
, NVMF_NQN_SIZE
)) {
633 pr_warn("hostnqn mismatch.\n");
636 if (!kref_get_unless_zero(&ctrl
->ref
))
644 pr_warn("could not find controller %d for subsys %s / host %s\n",
645 cntlid
, subsysnqn
, hostnqn
);
646 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(cntlid
);
647 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
650 mutex_unlock(&subsys
->lock
);
651 nvmet_subsys_put(subsys
);
655 static bool __nvmet_host_allowed(struct nvmet_subsys
*subsys
,
658 struct nvmet_host_link
*p
;
660 if (subsys
->allow_any_host
)
663 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
664 if (!strcmp(nvmet_host_name(p
->host
), hostnqn
))
671 static bool nvmet_host_discovery_allowed(struct nvmet_req
*req
,
674 struct nvmet_subsys_link
*s
;
676 list_for_each_entry(s
, &req
->port
->subsystems
, entry
) {
677 if (__nvmet_host_allowed(s
->subsys
, hostnqn
))
684 bool nvmet_host_allowed(struct nvmet_req
*req
, struct nvmet_subsys
*subsys
,
687 lockdep_assert_held(&nvmet_config_sem
);
689 if (subsys
->type
== NVME_NQN_DISC
)
690 return nvmet_host_discovery_allowed(req
, hostnqn
);
692 return __nvmet_host_allowed(subsys
, hostnqn
);
695 u16
nvmet_alloc_ctrl(const char *subsysnqn
, const char *hostnqn
,
696 struct nvmet_req
*req
, u32 kato
, struct nvmet_ctrl
**ctrlp
)
698 struct nvmet_subsys
*subsys
;
699 struct nvmet_ctrl
*ctrl
;
703 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
704 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
706 pr_warn("connect request for invalid subsystem %s!\n",
708 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
712 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
713 down_read(&nvmet_config_sem
);
714 if (!nvmet_host_allowed(req
, subsys
, hostnqn
)) {
715 pr_info("connect by host %s for subsystem %s not allowed\n",
717 req
->rsp
->result
.u32
= IPO_IATTR_CONNECT_DATA(hostnqn
);
718 up_read(&nvmet_config_sem
);
719 goto out_put_subsystem
;
721 up_read(&nvmet_config_sem
);
723 status
= NVME_SC_INTERNAL
;
724 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
726 goto out_put_subsystem
;
727 mutex_init(&ctrl
->lock
);
729 nvmet_init_cap(ctrl
);
731 INIT_WORK(&ctrl
->async_event_work
, nvmet_async_event_work
);
732 INIT_LIST_HEAD(&ctrl
->async_events
);
734 memcpy(ctrl
->subsysnqn
, subsysnqn
, NVMF_NQN_SIZE
);
735 memcpy(ctrl
->hostnqn
, hostnqn
, NVMF_NQN_SIZE
);
737 /* generate a random serial number as our controllers are ephemeral: */
738 get_random_bytes(&ctrl
->serial
, sizeof(ctrl
->serial
));
740 kref_init(&ctrl
->ref
);
741 ctrl
->subsys
= subsys
;
743 ctrl
->cqs
= kcalloc(subsys
->max_qid
+ 1,
744 sizeof(struct nvmet_cq
*),
749 ctrl
->sqs
= kcalloc(subsys
->max_qid
+ 1,
750 sizeof(struct nvmet_sq
*),
755 ret
= ida_simple_get(&cntlid_ida
,
756 NVME_CNTLID_MIN
, NVME_CNTLID_MAX
,
759 status
= NVME_SC_CONNECT_CTRL_BUSY
| NVME_SC_DNR
;
764 ctrl
->ops
= req
->ops
;
765 if (ctrl
->subsys
->type
== NVME_NQN_DISC
) {
766 /* Don't accept keep-alive timeout for discovery controllers */
768 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
773 * Discovery controllers use some arbitrary high value in order
774 * to cleanup stale discovery sessions
776 * From the latest base diff RC:
777 * "The Keep Alive command is not supported by
778 * Discovery controllers. A transport may specify a
779 * fixed Discovery controller activity timeout value
780 * (e.g., 2 minutes). If no commands are received
781 * by a Discovery controller within that time
782 * period, the controller may perform the
783 * actions for Keep Alive Timer expiration".
785 ctrl
->kato
= NVMET_DISC_KATO
;
787 /* keep-alive timeout in seconds */
788 ctrl
->kato
= DIV_ROUND_UP(kato
, 1000);
790 nvmet_start_keep_alive_timer(ctrl
);
792 mutex_lock(&subsys
->lock
);
793 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
794 mutex_unlock(&subsys
->lock
);
806 nvmet_subsys_put(subsys
);
811 static void nvmet_ctrl_free(struct kref
*ref
)
813 struct nvmet_ctrl
*ctrl
= container_of(ref
, struct nvmet_ctrl
, ref
);
814 struct nvmet_subsys
*subsys
= ctrl
->subsys
;
816 nvmet_stop_keep_alive_timer(ctrl
);
818 mutex_lock(&subsys
->lock
);
819 list_del(&ctrl
->subsys_entry
);
820 mutex_unlock(&subsys
->lock
);
822 flush_work(&ctrl
->async_event_work
);
823 cancel_work_sync(&ctrl
->fatal_err_work
);
825 ida_simple_remove(&cntlid_ida
, ctrl
->cntlid
);
826 nvmet_subsys_put(subsys
);
833 void nvmet_ctrl_put(struct nvmet_ctrl
*ctrl
)
835 kref_put(&ctrl
->ref
, nvmet_ctrl_free
);
838 static void nvmet_fatal_error_handler(struct work_struct
*work
)
840 struct nvmet_ctrl
*ctrl
=
841 container_of(work
, struct nvmet_ctrl
, fatal_err_work
);
843 pr_err("ctrl %d fatal error occurred!\n", ctrl
->cntlid
);
844 ctrl
->ops
->delete_ctrl(ctrl
);
847 void nvmet_ctrl_fatal_error(struct nvmet_ctrl
*ctrl
)
849 mutex_lock(&ctrl
->lock
);
850 if (!(ctrl
->csts
& NVME_CSTS_CFS
)) {
851 ctrl
->csts
|= NVME_CSTS_CFS
;
852 INIT_WORK(&ctrl
->fatal_err_work
, nvmet_fatal_error_handler
);
853 schedule_work(&ctrl
->fatal_err_work
);
855 mutex_unlock(&ctrl
->lock
);
857 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error
);
859 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
860 const char *subsysnqn
)
862 struct nvmet_subsys_link
*p
;
867 if (!strncmp(NVME_DISC_SUBSYS_NAME
, subsysnqn
,
869 if (!kref_get_unless_zero(&nvmet_disc_subsys
->ref
))
871 return nvmet_disc_subsys
;
874 down_read(&nvmet_config_sem
);
875 list_for_each_entry(p
, &port
->subsystems
, entry
) {
876 if (!strncmp(p
->subsys
->subsysnqn
, subsysnqn
,
878 if (!kref_get_unless_zero(&p
->subsys
->ref
))
880 up_read(&nvmet_config_sem
);
884 up_read(&nvmet_config_sem
);
888 struct nvmet_subsys
*nvmet_subsys_alloc(const char *subsysnqn
,
889 enum nvme_subsys_type type
)
891 struct nvmet_subsys
*subsys
;
893 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
897 subsys
->ver
= NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
901 subsys
->max_qid
= NVMET_NR_QUEUES
;
907 pr_err("%s: Unknown Subsystem type - %d\n", __func__
, type
);
912 subsys
->subsysnqn
= kstrndup(subsysnqn
, NVMF_NQN_SIZE
,
914 if (!subsys
->subsysnqn
) {
919 kref_init(&subsys
->ref
);
921 mutex_init(&subsys
->lock
);
922 INIT_LIST_HEAD(&subsys
->namespaces
);
923 INIT_LIST_HEAD(&subsys
->ctrls
);
924 INIT_LIST_HEAD(&subsys
->hosts
);
929 static void nvmet_subsys_free(struct kref
*ref
)
931 struct nvmet_subsys
*subsys
=
932 container_of(ref
, struct nvmet_subsys
, ref
);
934 WARN_ON_ONCE(!list_empty(&subsys
->namespaces
));
936 kfree(subsys
->subsysnqn
);
940 void nvmet_subsys_del_ctrls(struct nvmet_subsys
*subsys
)
942 struct nvmet_ctrl
*ctrl
;
944 mutex_lock(&subsys
->lock
);
945 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
946 ctrl
->ops
->delete_ctrl(ctrl
);
947 mutex_unlock(&subsys
->lock
);
950 void nvmet_subsys_put(struct nvmet_subsys
*subsys
)
952 kref_put(&subsys
->ref
, nvmet_subsys_free
);
955 static int __init
nvmet_init(void)
959 error
= nvmet_init_discovery();
963 error
= nvmet_init_configfs();
965 goto out_exit_discovery
;
969 nvmet_exit_discovery();
974 static void __exit
nvmet_exit(void)
976 nvmet_exit_configfs();
977 nvmet_exit_discovery();
978 ida_destroy(&cntlid_ida
);
980 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry
) != 1024);
981 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr
) != 1024);
984 module_init(nvmet_init
);
985 module_exit(nvmet_exit
);
987 MODULE_LICENSE("GPL v2");