]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/nvme/target/core.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / nvme / target / core.c
CommitLineData
a07b4970
CH
1/*
2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
28b89118 16#include <linux/random.h>
b2d09103
IM
17#include <linux/rculist.h>
18
a07b4970
CH
19#include "nvmet.h"
20
21static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
15fbad96 22static DEFINE_IDA(cntlid_ida);
a07b4970
CH
23
24/*
25 * This read/write semaphore is used to synchronize access to configuration
26 * information on a target system that will result in discovery log page
27 * information change for at least one host.
28 * The full list of resources to protected by this semaphore is:
29 *
30 * - subsystems list
31 * - per-subsystem allowed hosts list
32 * - allow_any_host subsystem attribute
33 * - nvmet_genctr
34 * - the nvmet_transports array
35 *
36 * When updating any of those lists/structures write lock should be obtained,
37 * while when reading (popolating discovery log page or checking host-subsystem
38 * link) read lock is obtained to allow concurrent reads.
39 */
40DECLARE_RWSEM(nvmet_config_sem);
41
42static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
43 const char *subsysnqn);
44
45u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
46 size_t len)
47{
48 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
49 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
50 return 0;
51}
52
53u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
54{
55 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
56 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
57 return 0;
58}
59
60static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
61{
62 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
63}
64
65static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
66{
67 struct nvmet_req *req;
68
69 while (1) {
70 mutex_lock(&ctrl->lock);
71 if (!ctrl->nr_async_event_cmds) {
72 mutex_unlock(&ctrl->lock);
73 return;
74 }
75
76 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
77 mutex_unlock(&ctrl->lock);
78 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
79 }
80}
81
82static void nvmet_async_event_work(struct work_struct *work)
83{
84 struct nvmet_ctrl *ctrl =
85 container_of(work, struct nvmet_ctrl, async_event_work);
86 struct nvmet_async_event *aen;
87 struct nvmet_req *req;
88
89 while (1) {
90 mutex_lock(&ctrl->lock);
91 aen = list_first_entry_or_null(&ctrl->async_events,
92 struct nvmet_async_event, entry);
93 if (!aen || !ctrl->nr_async_event_cmds) {
94 mutex_unlock(&ctrl->lock);
95 return;
96 }
97
98 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
99 nvmet_set_result(req, nvmet_async_event_result(aen));
100
101 list_del(&aen->entry);
102 kfree(aen);
103
104 mutex_unlock(&ctrl->lock);
105 nvmet_req_complete(req, 0);
106 }
107}
108
109static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
110 u8 event_info, u8 log_page)
111{
112 struct nvmet_async_event *aen;
113
114 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
115 if (!aen)
116 return;
117
118 aen->event_type = event_type;
119 aen->event_info = event_info;
120 aen->log_page = log_page;
121
122 mutex_lock(&ctrl->lock);
123 list_add_tail(&aen->entry, &ctrl->async_events);
124 mutex_unlock(&ctrl->lock);
125
126 schedule_work(&ctrl->async_event_work);
127}
128
129int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
130{
131 int ret = 0;
132
133 down_write(&nvmet_config_sem);
134 if (nvmet_transports[ops->type])
135 ret = -EINVAL;
136 else
137 nvmet_transports[ops->type] = ops;
138 up_write(&nvmet_config_sem);
139
140 return ret;
141}
142EXPORT_SYMBOL_GPL(nvmet_register_transport);
143
144void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
145{
146 down_write(&nvmet_config_sem);
147 nvmet_transports[ops->type] = NULL;
148 up_write(&nvmet_config_sem);
149}
150EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
151
152int nvmet_enable_port(struct nvmet_port *port)
153{
154 struct nvmet_fabrics_ops *ops;
155 int ret;
156
157 lockdep_assert_held(&nvmet_config_sem);
158
159 ops = nvmet_transports[port->disc_addr.trtype];
160 if (!ops) {
161 up_write(&nvmet_config_sem);
162 request_module("nvmet-transport-%d", port->disc_addr.trtype);
163 down_write(&nvmet_config_sem);
164 ops = nvmet_transports[port->disc_addr.trtype];
165 if (!ops) {
166 pr_err("transport type %d not supported\n",
167 port->disc_addr.trtype);
168 return -EINVAL;
169 }
170 }
171
172 if (!try_module_get(ops->owner))
173 return -EINVAL;
174
175 ret = ops->add_port(port);
176 if (ret) {
177 module_put(ops->owner);
178 return ret;
179 }
180
181 port->enabled = true;
182 return 0;
183}
184
185void nvmet_disable_port(struct nvmet_port *port)
186{
187 struct nvmet_fabrics_ops *ops;
188
189 lockdep_assert_held(&nvmet_config_sem);
190
191 port->enabled = false;
192
193 ops = nvmet_transports[port->disc_addr.trtype];
194 ops->remove_port(port);
195 module_put(ops->owner);
196}
197
198static void nvmet_keep_alive_timer(struct work_struct *work)
199{
200 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
201 struct nvmet_ctrl, ka_work);
202
203 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
204 ctrl->cntlid, ctrl->kato);
205
23a8ed4a 206 nvmet_ctrl_fatal_error(ctrl);
a07b4970
CH
207}
208
209static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
210{
211 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
212 ctrl->cntlid, ctrl->kato);
213
214 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
215 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
216}
217
218static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
219{
220 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
221
222 cancel_delayed_work_sync(&ctrl->ka_work);
223}
224
225static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
226 __le32 nsid)
227{
228 struct nvmet_ns *ns;
229
230 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
231 if (ns->nsid == le32_to_cpu(nsid))
232 return ns;
233 }
234
235 return NULL;
236}
237
238struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
239{
240 struct nvmet_ns *ns;
241
242 rcu_read_lock();
243 ns = __nvmet_find_namespace(ctrl, nsid);
244 if (ns)
245 percpu_ref_get(&ns->ref);
246 rcu_read_unlock();
247
248 return ns;
249}
250
251static void nvmet_destroy_namespace(struct percpu_ref *ref)
252{
253 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
254
255 complete(&ns->disable_done);
256}
257
258void nvmet_put_namespace(struct nvmet_ns *ns)
259{
260 percpu_ref_put(&ns->ref);
261}
262
263int nvmet_ns_enable(struct nvmet_ns *ns)
264{
265 struct nvmet_subsys *subsys = ns->subsys;
266 struct nvmet_ctrl *ctrl;
267 int ret = 0;
268
269 mutex_lock(&subsys->lock);
e4fcf07c 270 if (ns->enabled)
a07b4970
CH
271 goto out_unlock;
272
273 ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
274 NULL);
275 if (IS_ERR(ns->bdev)) {
4151dd9a
PP
276 pr_err("failed to open block device %s: (%ld)\n",
277 ns->device_path, PTR_ERR(ns->bdev));
a07b4970
CH
278 ret = PTR_ERR(ns->bdev);
279 ns->bdev = NULL;
280 goto out_unlock;
281 }
282
283 ns->size = i_size_read(ns->bdev->bd_inode);
284 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
285
286 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
287 0, GFP_KERNEL);
288 if (ret)
289 goto out_blkdev_put;
290
291 if (ns->nsid > subsys->max_nsid)
292 subsys->max_nsid = ns->nsid;
293
294 /*
295 * The namespaces list needs to be sorted to simplify the implementation
296 * of the Identify Namepace List subcommand.
297 */
298 if (list_empty(&subsys->namespaces)) {
299 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
300 } else {
301 struct nvmet_ns *old;
302
303 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
304 BUG_ON(ns->nsid == old->nsid);
305 if (ns->nsid < old->nsid)
306 break;
307 }
308
309 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
310 }
311
312 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
313 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
314
e4fcf07c 315 ns->enabled = true;
a07b4970
CH
316 ret = 0;
317out_unlock:
318 mutex_unlock(&subsys->lock);
319 return ret;
320out_blkdev_put:
321 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
322 ns->bdev = NULL;
323 goto out_unlock;
324}
325
326void nvmet_ns_disable(struct nvmet_ns *ns)
327{
328 struct nvmet_subsys *subsys = ns->subsys;
329 struct nvmet_ctrl *ctrl;
330
331 mutex_lock(&subsys->lock);
e4fcf07c
SA
332 if (!ns->enabled)
333 goto out_unlock;
334
335 ns->enabled = false;
336 list_del_rcu(&ns->dev_link);
a07b4970
CH
337 mutex_unlock(&subsys->lock);
338
339 /*
340 * Now that we removed the namespaces from the lookup list, we
341 * can kill the per_cpu ref and wait for any remaining references
342 * to be dropped, as well as a RCU grace period for anyone only
343 * using the namepace under rcu_read_lock(). Note that we can't
344 * use call_rcu here as we need to ensure the namespaces have
345 * been fully destroyed before unloading the module.
346 */
347 percpu_ref_kill(&ns->ref);
348 synchronize_rcu();
349 wait_for_completion(&ns->disable_done);
350 percpu_ref_exit(&ns->ref);
351
352 mutex_lock(&subsys->lock);
353 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
354 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
355
356 if (ns->bdev)
357 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
e4fcf07c 358out_unlock:
a07b4970
CH
359 mutex_unlock(&subsys->lock);
360}
361
362void nvmet_ns_free(struct nvmet_ns *ns)
363{
364 nvmet_ns_disable(ns);
365
366 kfree(ns->device_path);
367 kfree(ns);
368}
369
370struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
371{
372 struct nvmet_ns *ns;
373
374 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
375 if (!ns)
376 return NULL;
377
378 INIT_LIST_HEAD(&ns->dev_link);
379 init_completion(&ns->disable_done);
380
381 ns->nsid = nsid;
382 ns->subsys = subsys;
637dc0f3 383 uuid_gen(&ns->uuid);
a07b4970
CH
384
385 return ns;
386}
387
388static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
389{
390 if (status)
391 nvmet_set_status(req, status);
392
393 /* XXX: need to fill in something useful for sq_head */
394 req->rsp->sq_head = 0;
395 if (likely(req->sq)) /* may happen during early failure */
396 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
397 req->rsp->command_id = req->cmd->common.command_id;
398
399 if (req->ns)
400 nvmet_put_namespace(req->ns);
401 req->ops->queue_response(req);
402}
403
404void nvmet_req_complete(struct nvmet_req *req, u16 status)
405{
406 __nvmet_req_complete(req, status);
407 percpu_ref_put(&req->sq->ref);
408}
409EXPORT_SYMBOL_GPL(nvmet_req_complete);
410
411void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
412 u16 qid, u16 size)
413{
414 cq->qid = qid;
415 cq->size = size;
416
417 ctrl->cqs[qid] = cq;
418}
419
420void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
421 u16 qid, u16 size)
422{
423 sq->qid = qid;
424 sq->size = size;
425
426 ctrl->sqs[qid] = sq;
427}
428
427242ce
SG
429static void nvmet_confirm_sq(struct percpu_ref *ref)
430{
431 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
432
433 complete(&sq->confirm_done);
434}
435
a07b4970
CH
436void nvmet_sq_destroy(struct nvmet_sq *sq)
437{
438 /*
439 * If this is the admin queue, complete all AERs so that our
440 * queue doesn't have outstanding requests on it.
441 */
442 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
443 nvmet_async_events_free(sq->ctrl);
427242ce
SG
444 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
445 wait_for_completion(&sq->confirm_done);
a07b4970
CH
446 wait_for_completion(&sq->free_done);
447 percpu_ref_exit(&sq->ref);
448
449 if (sq->ctrl) {
450 nvmet_ctrl_put(sq->ctrl);
451 sq->ctrl = NULL; /* allows reusing the queue later */
452 }
453}
454EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
455
456static void nvmet_sq_free(struct percpu_ref *ref)
457{
458 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
459
460 complete(&sq->free_done);
461}
462
463int nvmet_sq_init(struct nvmet_sq *sq)
464{
465 int ret;
466
467 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
468 if (ret) {
469 pr_err("percpu_ref init failed!\n");
470 return ret;
471 }
472 init_completion(&sq->free_done);
427242ce 473 init_completion(&sq->confirm_done);
a07b4970
CH
474
475 return 0;
476}
477EXPORT_SYMBOL_GPL(nvmet_sq_init);
478
479bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
480 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
481{
482 u8 flags = req->cmd->common.flags;
483 u16 status;
484
485 req->cq = cq;
486 req->sq = sq;
487 req->ops = ops;
488 req->sg = NULL;
489 req->sg_cnt = 0;
490 req->rsp->status = 0;
491
492 /* no support for fused commands yet */
493 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
494 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
495 goto fail;
496 }
497
498 /* either variant of SGLs is fine, as we don't support metadata */
499 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
500 (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
501 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
502 goto fail;
503 }
504
505 if (unlikely(!req->sq->ctrl))
506 /* will return an error for any Non-connect command: */
507 status = nvmet_parse_connect_cmd(req);
508 else if (likely(req->sq->qid != 0))
509 status = nvmet_parse_io_cmd(req);
510 else if (req->cmd->common.opcode == nvme_fabrics_command)
511 status = nvmet_parse_fabrics_cmd(req);
512 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
513 status = nvmet_parse_discovery_cmd(req);
514 else
515 status = nvmet_parse_admin_cmd(req);
516
517 if (status)
518 goto fail;
519
520 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
521 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
522 goto fail;
523 }
524
525 return true;
526
527fail:
528 __nvmet_req_complete(req, status);
529 return false;
530}
531EXPORT_SYMBOL_GPL(nvmet_req_init);
532
549f01ae
VI
533void nvmet_req_uninit(struct nvmet_req *req)
534{
535 percpu_ref_put(&req->sq->ref);
536}
537EXPORT_SYMBOL_GPL(nvmet_req_uninit);
538
a07b4970
CH
539static inline bool nvmet_cc_en(u32 cc)
540{
541 return cc & 0x1;
542}
543
544static inline u8 nvmet_cc_css(u32 cc)
545{
546 return (cc >> 4) & 0x7;
547}
548
549static inline u8 nvmet_cc_mps(u32 cc)
550{
551 return (cc >> 7) & 0xf;
552}
553
554static inline u8 nvmet_cc_ams(u32 cc)
555{
556 return (cc >> 11) & 0x7;
557}
558
559static inline u8 nvmet_cc_shn(u32 cc)
560{
561 return (cc >> 14) & 0x3;
562}
563
564static inline u8 nvmet_cc_iosqes(u32 cc)
565{
566 return (cc >> 16) & 0xf;
567}
568
569static inline u8 nvmet_cc_iocqes(u32 cc)
570{
571 return (cc >> 20) & 0xf;
572}
573
574static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
575{
576 lockdep_assert_held(&ctrl->lock);
577
578 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
579 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
580 nvmet_cc_mps(ctrl->cc) != 0 ||
581 nvmet_cc_ams(ctrl->cc) != 0 ||
582 nvmet_cc_css(ctrl->cc) != 0) {
583 ctrl->csts = NVME_CSTS_CFS;
584 return;
585 }
586
587 ctrl->csts = NVME_CSTS_RDY;
588}
589
590static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
591{
592 lockdep_assert_held(&ctrl->lock);
593
594 /* XXX: tear down queues? */
595 ctrl->csts &= ~NVME_CSTS_RDY;
596 ctrl->cc = 0;
597}
598
599void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
600{
601 u32 old;
602
603 mutex_lock(&ctrl->lock);
604 old = ctrl->cc;
605 ctrl->cc = new;
606
607 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
608 nvmet_start_ctrl(ctrl);
609 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
610 nvmet_clear_ctrl(ctrl);
611 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
612 nvmet_clear_ctrl(ctrl);
613 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
614 }
615 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
616 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
617 mutex_unlock(&ctrl->lock);
618}
619
620static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
621{
622 /* command sets supported: NVMe command set: */
623 ctrl->cap = (1ULL << 37);
624 /* CC.EN timeout in 500msec units: */
625 ctrl->cap |= (15ULL << 24);
626 /* maximum queue entries supported: */
627 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
628}
629
630u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
631 struct nvmet_req *req, struct nvmet_ctrl **ret)
632{
633 struct nvmet_subsys *subsys;
634 struct nvmet_ctrl *ctrl;
635 u16 status = 0;
636
637 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
638 if (!subsys) {
639 pr_warn("connect request for invalid subsystem %s!\n",
640 subsysnqn);
d49187e9 641 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
a07b4970
CH
642 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
643 }
644
645 mutex_lock(&subsys->lock);
646 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
647 if (ctrl->cntlid == cntlid) {
648 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
649 pr_warn("hostnqn mismatch.\n");
650 continue;
651 }
652 if (!kref_get_unless_zero(&ctrl->ref))
653 continue;
654
655 *ret = ctrl;
656 goto out;
657 }
658 }
659
660 pr_warn("could not find controller %d for subsys %s / host %s\n",
661 cntlid, subsysnqn, hostnqn);
d49187e9 662 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
a07b4970
CH
663 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
664
665out:
666 mutex_unlock(&subsys->lock);
667 nvmet_subsys_put(subsys);
668 return status;
669}
670
64a0ca88
PP
671u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
672{
673 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
674 pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n",
675 cmd->common.opcode, req->sq->qid);
676 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
677 }
678
679 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
680 pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n",
681 cmd->common.opcode, req->sq->qid);
682 req->ns = NULL;
683 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
684 }
685 return 0;
686}
687
a07b4970
CH
688static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
689 const char *hostnqn)
690{
691 struct nvmet_host_link *p;
692
693 if (subsys->allow_any_host)
694 return true;
695
696 list_for_each_entry(p, &subsys->hosts, entry) {
697 if (!strcmp(nvmet_host_name(p->host), hostnqn))
698 return true;
699 }
700
701 return false;
702}
703
704static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
705 const char *hostnqn)
706{
707 struct nvmet_subsys_link *s;
708
709 list_for_each_entry(s, &req->port->subsystems, entry) {
710 if (__nvmet_host_allowed(s->subsys, hostnqn))
711 return true;
712 }
713
714 return false;
715}
716
717bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
718 const char *hostnqn)
719{
720 lockdep_assert_held(&nvmet_config_sem);
721
722 if (subsys->type == NVME_NQN_DISC)
723 return nvmet_host_discovery_allowed(req, hostnqn);
724 else
725 return __nvmet_host_allowed(subsys, hostnqn);
726}
727
728u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
729 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
730{
731 struct nvmet_subsys *subsys;
732 struct nvmet_ctrl *ctrl;
733 int ret;
734 u16 status;
735
736 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
737 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
738 if (!subsys) {
739 pr_warn("connect request for invalid subsystem %s!\n",
740 subsysnqn);
d49187e9 741 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
a07b4970
CH
742 goto out;
743 }
744
745 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
746 down_read(&nvmet_config_sem);
747 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
748 pr_info("connect by host %s for subsystem %s not allowed\n",
749 hostnqn, subsysnqn);
d49187e9 750 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
a07b4970
CH
751 up_read(&nvmet_config_sem);
752 goto out_put_subsystem;
753 }
754 up_read(&nvmet_config_sem);
755
756 status = NVME_SC_INTERNAL;
757 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
758 if (!ctrl)
759 goto out_put_subsystem;
760 mutex_init(&ctrl->lock);
761
762 nvmet_init_cap(ctrl);
763
764 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
765 INIT_LIST_HEAD(&ctrl->async_events);
766
767 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
768 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
769
770 kref_init(&ctrl->ref);
771 ctrl->subsys = subsys;
772
773 ctrl->cqs = kcalloc(subsys->max_qid + 1,
774 sizeof(struct nvmet_cq *),
775 GFP_KERNEL);
776 if (!ctrl->cqs)
777 goto out_free_ctrl;
778
779 ctrl->sqs = kcalloc(subsys->max_qid + 1,
780 sizeof(struct nvmet_sq *),
781 GFP_KERNEL);
782 if (!ctrl->sqs)
783 goto out_free_cqs;
784
15fbad96 785 ret = ida_simple_get(&cntlid_ida,
a07b4970
CH
786 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
787 GFP_KERNEL);
788 if (ret < 0) {
789 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
790 goto out_free_sqs;
791 }
792 ctrl->cntlid = ret;
793
794 ctrl->ops = req->ops;
795 if (ctrl->subsys->type == NVME_NQN_DISC) {
796 /* Don't accept keep-alive timeout for discovery controllers */
797 if (kato) {
798 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
799 goto out_free_sqs;
800 }
801
802 /*
803 * Discovery controllers use some arbitrary high value in order
804 * to cleanup stale discovery sessions
805 *
806 * From the latest base diff RC:
807 * "The Keep Alive command is not supported by
808 * Discovery controllers. A transport may specify a
809 * fixed Discovery controller activity timeout value
810 * (e.g., 2 minutes). If no commands are received
811 * by a Discovery controller within that time
812 * period, the controller may perform the
813 * actions for Keep Alive Timer expiration".
814 */
815 ctrl->kato = NVMET_DISC_KATO;
816 } else {
817 /* keep-alive timeout in seconds */
818 ctrl->kato = DIV_ROUND_UP(kato, 1000);
819 }
820 nvmet_start_keep_alive_timer(ctrl);
821
822 mutex_lock(&subsys->lock);
823 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
824 mutex_unlock(&subsys->lock);
825
826 *ctrlp = ctrl;
827 return 0;
828
829out_free_sqs:
830 kfree(ctrl->sqs);
831out_free_cqs:
832 kfree(ctrl->cqs);
833out_free_ctrl:
834 kfree(ctrl);
835out_put_subsystem:
836 nvmet_subsys_put(subsys);
837out:
838 return status;
839}
840
841static void nvmet_ctrl_free(struct kref *ref)
842{
843 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
844 struct nvmet_subsys *subsys = ctrl->subsys;
845
846 nvmet_stop_keep_alive_timer(ctrl);
847
848 mutex_lock(&subsys->lock);
849 list_del(&ctrl->subsys_entry);
850 mutex_unlock(&subsys->lock);
851
06406d81
SG
852 flush_work(&ctrl->async_event_work);
853 cancel_work_sync(&ctrl->fatal_err_work);
854
15fbad96 855 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
a07b4970
CH
856 nvmet_subsys_put(subsys);
857
858 kfree(ctrl->sqs);
859 kfree(ctrl->cqs);
860 kfree(ctrl);
861}
862
863void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
864{
865 kref_put(&ctrl->ref, nvmet_ctrl_free);
866}
867
868static void nvmet_fatal_error_handler(struct work_struct *work)
869{
870 struct nvmet_ctrl *ctrl =
871 container_of(work, struct nvmet_ctrl, fatal_err_work);
872
873 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
874 ctrl->ops->delete_ctrl(ctrl);
875}
876
877void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
878{
8242ddac
SG
879 mutex_lock(&ctrl->lock);
880 if (!(ctrl->csts & NVME_CSTS_CFS)) {
881 ctrl->csts |= NVME_CSTS_CFS;
882 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
883 schedule_work(&ctrl->fatal_err_work);
884 }
885 mutex_unlock(&ctrl->lock);
a07b4970
CH
886}
887EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
888
889static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
890 const char *subsysnqn)
891{
892 struct nvmet_subsys_link *p;
893
894 if (!port)
895 return NULL;
896
897 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
898 NVMF_NQN_SIZE)) {
899 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
900 return NULL;
901 return nvmet_disc_subsys;
902 }
903
904 down_read(&nvmet_config_sem);
905 list_for_each_entry(p, &port->subsystems, entry) {
906 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
907 NVMF_NQN_SIZE)) {
908 if (!kref_get_unless_zero(&p->subsys->ref))
909 break;
910 up_read(&nvmet_config_sem);
911 return p->subsys;
912 }
913 }
914 up_read(&nvmet_config_sem);
915 return NULL;
916}
917
918struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
919 enum nvme_subsys_type type)
920{
921 struct nvmet_subsys *subsys;
922
923 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
924 if (!subsys)
925 return NULL;
926
637dc0f3 927 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
2e7f5d2a
JT
928 /* generate a random serial number as our controllers are ephemeral: */
929 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
a07b4970
CH
930
931 switch (type) {
932 case NVME_NQN_NVME:
933 subsys->max_qid = NVMET_NR_QUEUES;
934 break;
935 case NVME_NQN_DISC:
936 subsys->max_qid = 0;
937 break;
938 default:
939 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
940 kfree(subsys);
941 return NULL;
942 }
943 subsys->type = type;
944 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
945 GFP_KERNEL);
69555af2 946 if (!subsys->subsysnqn) {
a07b4970
CH
947 kfree(subsys);
948 return NULL;
949 }
950
951 kref_init(&subsys->ref);
952
953 mutex_init(&subsys->lock);
954 INIT_LIST_HEAD(&subsys->namespaces);
955 INIT_LIST_HEAD(&subsys->ctrls);
a07b4970
CH
956 INIT_LIST_HEAD(&subsys->hosts);
957
958 return subsys;
959}
960
961static void nvmet_subsys_free(struct kref *ref)
962{
963 struct nvmet_subsys *subsys =
964 container_of(ref, struct nvmet_subsys, ref);
965
966 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
967
a07b4970
CH
968 kfree(subsys->subsysnqn);
969 kfree(subsys);
970}
971
344770b0
SG
972void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
973{
974 struct nvmet_ctrl *ctrl;
975
976 mutex_lock(&subsys->lock);
977 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
978 ctrl->ops->delete_ctrl(ctrl);
979 mutex_unlock(&subsys->lock);
980}
981
a07b4970
CH
982void nvmet_subsys_put(struct nvmet_subsys *subsys)
983{
984 kref_put(&subsys->ref, nvmet_subsys_free);
985}
986
987static int __init nvmet_init(void)
988{
989 int error;
990
991 error = nvmet_init_discovery();
992 if (error)
993 goto out;
994
995 error = nvmet_init_configfs();
996 if (error)
997 goto out_exit_discovery;
998 return 0;
999
1000out_exit_discovery:
1001 nvmet_exit_discovery();
1002out:
1003 return error;
1004}
1005
1006static void __exit nvmet_exit(void)
1007{
1008 nvmet_exit_configfs();
1009 nvmet_exit_discovery();
15fbad96 1010 ida_destroy(&cntlid_ida);
a07b4970
CH
1011
1012 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1013 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1014}
1015
1016module_init(nvmet_init);
1017module_exit(nvmet_exit);
1018
1019MODULE_LICENSE("GPL v2");