]>
Commit | Line | Data |
---|---|---|
a07b4970 CH |
1 | /* |
2 | * Common code for the NVMe target. | |
3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | #include <linux/module.h> | |
28b89118 | 16 | #include <linux/random.h> |
b2d09103 IM |
17 | #include <linux/rculist.h> |
18 | ||
a07b4970 CH |
19 | #include "nvmet.h" |
20 | ||
21 | static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; | |
15fbad96 | 22 | static DEFINE_IDA(cntlid_ida); |
a07b4970 CH |
23 | |
24 | /* | |
25 | * This read/write semaphore is used to synchronize access to configuration | |
26 | * information on a target system that will result in discovery log page | |
27 | * information change for at least one host. | |
28 | * The full list of resources to protected by this semaphore is: | |
29 | * | |
30 | * - subsystems list | |
31 | * - per-subsystem allowed hosts list | |
32 | * - allow_any_host subsystem attribute | |
33 | * - nvmet_genctr | |
34 | * - the nvmet_transports array | |
35 | * | |
36 | * When updating any of those lists/structures write lock should be obtained, | |
37 | * while when reading (popolating discovery log page or checking host-subsystem | |
38 | * link) read lock is obtained to allow concurrent reads. | |
39 | */ | |
40 | DECLARE_RWSEM(nvmet_config_sem); | |
41 | ||
42 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, | |
43 | const char *subsysnqn); | |
44 | ||
45 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, | |
46 | size_t len) | |
47 | { | |
48 | if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) | |
49 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; | |
50 | return 0; | |
51 | } | |
52 | ||
53 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) | |
54 | { | |
55 | if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) | |
56 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; | |
57 | return 0; | |
58 | } | |
59 | ||
60 | static u32 nvmet_async_event_result(struct nvmet_async_event *aen) | |
61 | { | |
62 | return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); | |
63 | } | |
64 | ||
65 | static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) | |
66 | { | |
67 | struct nvmet_req *req; | |
68 | ||
69 | while (1) { | |
70 | mutex_lock(&ctrl->lock); | |
71 | if (!ctrl->nr_async_event_cmds) { | |
72 | mutex_unlock(&ctrl->lock); | |
73 | return; | |
74 | } | |
75 | ||
76 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; | |
77 | mutex_unlock(&ctrl->lock); | |
78 | nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); | |
79 | } | |
80 | } | |
81 | ||
82 | static void nvmet_async_event_work(struct work_struct *work) | |
83 | { | |
84 | struct nvmet_ctrl *ctrl = | |
85 | container_of(work, struct nvmet_ctrl, async_event_work); | |
86 | struct nvmet_async_event *aen; | |
87 | struct nvmet_req *req; | |
88 | ||
89 | while (1) { | |
90 | mutex_lock(&ctrl->lock); | |
91 | aen = list_first_entry_or_null(&ctrl->async_events, | |
92 | struct nvmet_async_event, entry); | |
93 | if (!aen || !ctrl->nr_async_event_cmds) { | |
94 | mutex_unlock(&ctrl->lock); | |
95 | return; | |
96 | } | |
97 | ||
98 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; | |
99 | nvmet_set_result(req, nvmet_async_event_result(aen)); | |
100 | ||
101 | list_del(&aen->entry); | |
102 | kfree(aen); | |
103 | ||
104 | mutex_unlock(&ctrl->lock); | |
105 | nvmet_req_complete(req, 0); | |
106 | } | |
107 | } | |
108 | ||
109 | static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, | |
110 | u8 event_info, u8 log_page) | |
111 | { | |
112 | struct nvmet_async_event *aen; | |
113 | ||
114 | aen = kmalloc(sizeof(*aen), GFP_KERNEL); | |
115 | if (!aen) | |
116 | return; | |
117 | ||
118 | aen->event_type = event_type; | |
119 | aen->event_info = event_info; | |
120 | aen->log_page = log_page; | |
121 | ||
122 | mutex_lock(&ctrl->lock); | |
123 | list_add_tail(&aen->entry, &ctrl->async_events); | |
124 | mutex_unlock(&ctrl->lock); | |
125 | ||
126 | schedule_work(&ctrl->async_event_work); | |
127 | } | |
128 | ||
129 | int nvmet_register_transport(struct nvmet_fabrics_ops *ops) | |
130 | { | |
131 | int ret = 0; | |
132 | ||
133 | down_write(&nvmet_config_sem); | |
134 | if (nvmet_transports[ops->type]) | |
135 | ret = -EINVAL; | |
136 | else | |
137 | nvmet_transports[ops->type] = ops; | |
138 | up_write(&nvmet_config_sem); | |
139 | ||
140 | return ret; | |
141 | } | |
142 | EXPORT_SYMBOL_GPL(nvmet_register_transport); | |
143 | ||
144 | void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops) | |
145 | { | |
146 | down_write(&nvmet_config_sem); | |
147 | nvmet_transports[ops->type] = NULL; | |
148 | up_write(&nvmet_config_sem); | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(nvmet_unregister_transport); | |
151 | ||
152 | int nvmet_enable_port(struct nvmet_port *port) | |
153 | { | |
154 | struct nvmet_fabrics_ops *ops; | |
155 | int ret; | |
156 | ||
157 | lockdep_assert_held(&nvmet_config_sem); | |
158 | ||
159 | ops = nvmet_transports[port->disc_addr.trtype]; | |
160 | if (!ops) { | |
161 | up_write(&nvmet_config_sem); | |
162 | request_module("nvmet-transport-%d", port->disc_addr.trtype); | |
163 | down_write(&nvmet_config_sem); | |
164 | ops = nvmet_transports[port->disc_addr.trtype]; | |
165 | if (!ops) { | |
166 | pr_err("transport type %d not supported\n", | |
167 | port->disc_addr.trtype); | |
168 | return -EINVAL; | |
169 | } | |
170 | } | |
171 | ||
172 | if (!try_module_get(ops->owner)) | |
173 | return -EINVAL; | |
174 | ||
175 | ret = ops->add_port(port); | |
176 | if (ret) { | |
177 | module_put(ops->owner); | |
178 | return ret; | |
179 | } | |
180 | ||
181 | port->enabled = true; | |
182 | return 0; | |
183 | } | |
184 | ||
185 | void nvmet_disable_port(struct nvmet_port *port) | |
186 | { | |
187 | struct nvmet_fabrics_ops *ops; | |
188 | ||
189 | lockdep_assert_held(&nvmet_config_sem); | |
190 | ||
191 | port->enabled = false; | |
192 | ||
193 | ops = nvmet_transports[port->disc_addr.trtype]; | |
194 | ops->remove_port(port); | |
195 | module_put(ops->owner); | |
196 | } | |
197 | ||
198 | static void nvmet_keep_alive_timer(struct work_struct *work) | |
199 | { | |
200 | struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), | |
201 | struct nvmet_ctrl, ka_work); | |
202 | ||
203 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", | |
204 | ctrl->cntlid, ctrl->kato); | |
205 | ||
23a8ed4a | 206 | nvmet_ctrl_fatal_error(ctrl); |
a07b4970 CH |
207 | } |
208 | ||
209 | static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) | |
210 | { | |
211 | pr_debug("ctrl %d start keep-alive timer for %d secs\n", | |
212 | ctrl->cntlid, ctrl->kato); | |
213 | ||
214 | INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); | |
215 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | |
216 | } | |
217 | ||
218 | static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) | |
219 | { | |
220 | pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); | |
221 | ||
222 | cancel_delayed_work_sync(&ctrl->ka_work); | |
223 | } | |
224 | ||
225 | static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl, | |
226 | __le32 nsid) | |
227 | { | |
228 | struct nvmet_ns *ns; | |
229 | ||
230 | list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { | |
231 | if (ns->nsid == le32_to_cpu(nsid)) | |
232 | return ns; | |
233 | } | |
234 | ||
235 | return NULL; | |
236 | } | |
237 | ||
238 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) | |
239 | { | |
240 | struct nvmet_ns *ns; | |
241 | ||
242 | rcu_read_lock(); | |
243 | ns = __nvmet_find_namespace(ctrl, nsid); | |
244 | if (ns) | |
245 | percpu_ref_get(&ns->ref); | |
246 | rcu_read_unlock(); | |
247 | ||
248 | return ns; | |
249 | } | |
250 | ||
251 | static void nvmet_destroy_namespace(struct percpu_ref *ref) | |
252 | { | |
253 | struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); | |
254 | ||
255 | complete(&ns->disable_done); | |
256 | } | |
257 | ||
258 | void nvmet_put_namespace(struct nvmet_ns *ns) | |
259 | { | |
260 | percpu_ref_put(&ns->ref); | |
261 | } | |
262 | ||
263 | int nvmet_ns_enable(struct nvmet_ns *ns) | |
264 | { | |
265 | struct nvmet_subsys *subsys = ns->subsys; | |
266 | struct nvmet_ctrl *ctrl; | |
267 | int ret = 0; | |
268 | ||
269 | mutex_lock(&subsys->lock); | |
e4fcf07c | 270 | if (ns->enabled) |
a07b4970 CH |
271 | goto out_unlock; |
272 | ||
273 | ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE, | |
274 | NULL); | |
275 | if (IS_ERR(ns->bdev)) { | |
4151dd9a PP |
276 | pr_err("failed to open block device %s: (%ld)\n", |
277 | ns->device_path, PTR_ERR(ns->bdev)); | |
a07b4970 CH |
278 | ret = PTR_ERR(ns->bdev); |
279 | ns->bdev = NULL; | |
280 | goto out_unlock; | |
281 | } | |
282 | ||
283 | ns->size = i_size_read(ns->bdev->bd_inode); | |
284 | ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); | |
285 | ||
286 | ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, | |
287 | 0, GFP_KERNEL); | |
288 | if (ret) | |
289 | goto out_blkdev_put; | |
290 | ||
291 | if (ns->nsid > subsys->max_nsid) | |
292 | subsys->max_nsid = ns->nsid; | |
293 | ||
294 | /* | |
295 | * The namespaces list needs to be sorted to simplify the implementation | |
296 | * of the Identify Namepace List subcommand. | |
297 | */ | |
298 | if (list_empty(&subsys->namespaces)) { | |
299 | list_add_tail_rcu(&ns->dev_link, &subsys->namespaces); | |
300 | } else { | |
301 | struct nvmet_ns *old; | |
302 | ||
303 | list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { | |
304 | BUG_ON(ns->nsid == old->nsid); | |
305 | if (ns->nsid < old->nsid) | |
306 | break; | |
307 | } | |
308 | ||
309 | list_add_tail_rcu(&ns->dev_link, &old->dev_link); | |
310 | } | |
311 | ||
312 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
313 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); | |
314 | ||
e4fcf07c | 315 | ns->enabled = true; |
a07b4970 CH |
316 | ret = 0; |
317 | out_unlock: | |
318 | mutex_unlock(&subsys->lock); | |
319 | return ret; | |
320 | out_blkdev_put: | |
321 | blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); | |
322 | ns->bdev = NULL; | |
323 | goto out_unlock; | |
324 | } | |
325 | ||
326 | void nvmet_ns_disable(struct nvmet_ns *ns) | |
327 | { | |
328 | struct nvmet_subsys *subsys = ns->subsys; | |
329 | struct nvmet_ctrl *ctrl; | |
330 | ||
331 | mutex_lock(&subsys->lock); | |
e4fcf07c SA |
332 | if (!ns->enabled) |
333 | goto out_unlock; | |
334 | ||
335 | ns->enabled = false; | |
336 | list_del_rcu(&ns->dev_link); | |
a07b4970 CH |
337 | mutex_unlock(&subsys->lock); |
338 | ||
339 | /* | |
340 | * Now that we removed the namespaces from the lookup list, we | |
341 | * can kill the per_cpu ref and wait for any remaining references | |
342 | * to be dropped, as well as a RCU grace period for anyone only | |
343 | * using the namepace under rcu_read_lock(). Note that we can't | |
344 | * use call_rcu here as we need to ensure the namespaces have | |
345 | * been fully destroyed before unloading the module. | |
346 | */ | |
347 | percpu_ref_kill(&ns->ref); | |
348 | synchronize_rcu(); | |
349 | wait_for_completion(&ns->disable_done); | |
350 | percpu_ref_exit(&ns->ref); | |
351 | ||
352 | mutex_lock(&subsys->lock); | |
353 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
354 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); | |
355 | ||
356 | if (ns->bdev) | |
357 | blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); | |
e4fcf07c | 358 | out_unlock: |
a07b4970 CH |
359 | mutex_unlock(&subsys->lock); |
360 | } | |
361 | ||
362 | void nvmet_ns_free(struct nvmet_ns *ns) | |
363 | { | |
364 | nvmet_ns_disable(ns); | |
365 | ||
366 | kfree(ns->device_path); | |
367 | kfree(ns); | |
368 | } | |
369 | ||
370 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) | |
371 | { | |
372 | struct nvmet_ns *ns; | |
373 | ||
374 | ns = kzalloc(sizeof(*ns), GFP_KERNEL); | |
375 | if (!ns) | |
376 | return NULL; | |
377 | ||
378 | INIT_LIST_HEAD(&ns->dev_link); | |
379 | init_completion(&ns->disable_done); | |
380 | ||
381 | ns->nsid = nsid; | |
382 | ns->subsys = subsys; | |
383 | ||
384 | return ns; | |
385 | } | |
386 | ||
387 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) | |
388 | { | |
389 | if (status) | |
390 | nvmet_set_status(req, status); | |
391 | ||
392 | /* XXX: need to fill in something useful for sq_head */ | |
393 | req->rsp->sq_head = 0; | |
394 | if (likely(req->sq)) /* may happen during early failure */ | |
395 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); | |
396 | req->rsp->command_id = req->cmd->common.command_id; | |
397 | ||
398 | if (req->ns) | |
399 | nvmet_put_namespace(req->ns); | |
400 | req->ops->queue_response(req); | |
401 | } | |
402 | ||
403 | void nvmet_req_complete(struct nvmet_req *req, u16 status) | |
404 | { | |
405 | __nvmet_req_complete(req, status); | |
406 | percpu_ref_put(&req->sq->ref); | |
407 | } | |
408 | EXPORT_SYMBOL_GPL(nvmet_req_complete); | |
409 | ||
410 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, | |
411 | u16 qid, u16 size) | |
412 | { | |
413 | cq->qid = qid; | |
414 | cq->size = size; | |
415 | ||
416 | ctrl->cqs[qid] = cq; | |
417 | } | |
418 | ||
419 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, | |
420 | u16 qid, u16 size) | |
421 | { | |
422 | sq->qid = qid; | |
423 | sq->size = size; | |
424 | ||
425 | ctrl->sqs[qid] = sq; | |
426 | } | |
427 | ||
427242ce SG |
428 | static void nvmet_confirm_sq(struct percpu_ref *ref) |
429 | { | |
430 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); | |
431 | ||
432 | complete(&sq->confirm_done); | |
433 | } | |
434 | ||
a07b4970 CH |
435 | void nvmet_sq_destroy(struct nvmet_sq *sq) |
436 | { | |
437 | /* | |
438 | * If this is the admin queue, complete all AERs so that our | |
439 | * queue doesn't have outstanding requests on it. | |
440 | */ | |
441 | if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) | |
442 | nvmet_async_events_free(sq->ctrl); | |
427242ce SG |
443 | percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); |
444 | wait_for_completion(&sq->confirm_done); | |
a07b4970 CH |
445 | wait_for_completion(&sq->free_done); |
446 | percpu_ref_exit(&sq->ref); | |
447 | ||
448 | if (sq->ctrl) { | |
449 | nvmet_ctrl_put(sq->ctrl); | |
450 | sq->ctrl = NULL; /* allows reusing the queue later */ | |
451 | } | |
452 | } | |
453 | EXPORT_SYMBOL_GPL(nvmet_sq_destroy); | |
454 | ||
455 | static void nvmet_sq_free(struct percpu_ref *ref) | |
456 | { | |
457 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); | |
458 | ||
459 | complete(&sq->free_done); | |
460 | } | |
461 | ||
462 | int nvmet_sq_init(struct nvmet_sq *sq) | |
463 | { | |
464 | int ret; | |
465 | ||
466 | ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); | |
467 | if (ret) { | |
468 | pr_err("percpu_ref init failed!\n"); | |
469 | return ret; | |
470 | } | |
471 | init_completion(&sq->free_done); | |
427242ce | 472 | init_completion(&sq->confirm_done); |
a07b4970 CH |
473 | |
474 | return 0; | |
475 | } | |
476 | EXPORT_SYMBOL_GPL(nvmet_sq_init); | |
477 | ||
478 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, | |
479 | struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops) | |
480 | { | |
481 | u8 flags = req->cmd->common.flags; | |
482 | u16 status; | |
483 | ||
484 | req->cq = cq; | |
485 | req->sq = sq; | |
486 | req->ops = ops; | |
487 | req->sg = NULL; | |
488 | req->sg_cnt = 0; | |
489 | req->rsp->status = 0; | |
490 | ||
491 | /* no support for fused commands yet */ | |
492 | if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { | |
493 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
494 | goto fail; | |
495 | } | |
496 | ||
497 | /* either variant of SGLs is fine, as we don't support metadata */ | |
498 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && | |
499 | (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { | |
500 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
501 | goto fail; | |
502 | } | |
503 | ||
504 | if (unlikely(!req->sq->ctrl)) | |
505 | /* will return an error for any Non-connect command: */ | |
506 | status = nvmet_parse_connect_cmd(req); | |
507 | else if (likely(req->sq->qid != 0)) | |
508 | status = nvmet_parse_io_cmd(req); | |
509 | else if (req->cmd->common.opcode == nvme_fabrics_command) | |
510 | status = nvmet_parse_fabrics_cmd(req); | |
511 | else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) | |
512 | status = nvmet_parse_discovery_cmd(req); | |
513 | else | |
514 | status = nvmet_parse_admin_cmd(req); | |
515 | ||
516 | if (status) | |
517 | goto fail; | |
518 | ||
519 | if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { | |
520 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
521 | goto fail; | |
522 | } | |
523 | ||
524 | return true; | |
525 | ||
526 | fail: | |
527 | __nvmet_req_complete(req, status); | |
528 | return false; | |
529 | } | |
530 | EXPORT_SYMBOL_GPL(nvmet_req_init); | |
531 | ||
532 | static inline bool nvmet_cc_en(u32 cc) | |
533 | { | |
534 | return cc & 0x1; | |
535 | } | |
536 | ||
537 | static inline u8 nvmet_cc_css(u32 cc) | |
538 | { | |
539 | return (cc >> 4) & 0x7; | |
540 | } | |
541 | ||
542 | static inline u8 nvmet_cc_mps(u32 cc) | |
543 | { | |
544 | return (cc >> 7) & 0xf; | |
545 | } | |
546 | ||
547 | static inline u8 nvmet_cc_ams(u32 cc) | |
548 | { | |
549 | return (cc >> 11) & 0x7; | |
550 | } | |
551 | ||
552 | static inline u8 nvmet_cc_shn(u32 cc) | |
553 | { | |
554 | return (cc >> 14) & 0x3; | |
555 | } | |
556 | ||
557 | static inline u8 nvmet_cc_iosqes(u32 cc) | |
558 | { | |
559 | return (cc >> 16) & 0xf; | |
560 | } | |
561 | ||
562 | static inline u8 nvmet_cc_iocqes(u32 cc) | |
563 | { | |
564 | return (cc >> 20) & 0xf; | |
565 | } | |
566 | ||
567 | static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) | |
568 | { | |
569 | lockdep_assert_held(&ctrl->lock); | |
570 | ||
571 | if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || | |
572 | nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || | |
573 | nvmet_cc_mps(ctrl->cc) != 0 || | |
574 | nvmet_cc_ams(ctrl->cc) != 0 || | |
575 | nvmet_cc_css(ctrl->cc) != 0) { | |
576 | ctrl->csts = NVME_CSTS_CFS; | |
577 | return; | |
578 | } | |
579 | ||
580 | ctrl->csts = NVME_CSTS_RDY; | |
581 | } | |
582 | ||
583 | static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) | |
584 | { | |
585 | lockdep_assert_held(&ctrl->lock); | |
586 | ||
587 | /* XXX: tear down queues? */ | |
588 | ctrl->csts &= ~NVME_CSTS_RDY; | |
589 | ctrl->cc = 0; | |
590 | } | |
591 | ||
592 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) | |
593 | { | |
594 | u32 old; | |
595 | ||
596 | mutex_lock(&ctrl->lock); | |
597 | old = ctrl->cc; | |
598 | ctrl->cc = new; | |
599 | ||
600 | if (nvmet_cc_en(new) && !nvmet_cc_en(old)) | |
601 | nvmet_start_ctrl(ctrl); | |
602 | if (!nvmet_cc_en(new) && nvmet_cc_en(old)) | |
603 | nvmet_clear_ctrl(ctrl); | |
604 | if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) { | |
605 | nvmet_clear_ctrl(ctrl); | |
606 | ctrl->csts |= NVME_CSTS_SHST_CMPLT; | |
607 | } | |
608 | if (!nvmet_cc_shn(new) && nvmet_cc_shn(old)) | |
609 | ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; | |
610 | mutex_unlock(&ctrl->lock); | |
611 | } | |
612 | ||
613 | static void nvmet_init_cap(struct nvmet_ctrl *ctrl) | |
614 | { | |
615 | /* command sets supported: NVMe command set: */ | |
616 | ctrl->cap = (1ULL << 37); | |
617 | /* CC.EN timeout in 500msec units: */ | |
618 | ctrl->cap |= (15ULL << 24); | |
619 | /* maximum queue entries supported: */ | |
620 | ctrl->cap |= NVMET_QUEUE_SIZE - 1; | |
621 | } | |
622 | ||
623 | u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, | |
624 | struct nvmet_req *req, struct nvmet_ctrl **ret) | |
625 | { | |
626 | struct nvmet_subsys *subsys; | |
627 | struct nvmet_ctrl *ctrl; | |
628 | u16 status = 0; | |
629 | ||
630 | subsys = nvmet_find_get_subsys(req->port, subsysnqn); | |
631 | if (!subsys) { | |
632 | pr_warn("connect request for invalid subsystem %s!\n", | |
633 | subsysnqn); | |
d49187e9 | 634 | req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
a07b4970 CH |
635 | return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
636 | } | |
637 | ||
638 | mutex_lock(&subsys->lock); | |
639 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { | |
640 | if (ctrl->cntlid == cntlid) { | |
641 | if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { | |
642 | pr_warn("hostnqn mismatch.\n"); | |
643 | continue; | |
644 | } | |
645 | if (!kref_get_unless_zero(&ctrl->ref)) | |
646 | continue; | |
647 | ||
648 | *ret = ctrl; | |
649 | goto out; | |
650 | } | |
651 | } | |
652 | ||
653 | pr_warn("could not find controller %d for subsys %s / host %s\n", | |
654 | cntlid, subsysnqn, hostnqn); | |
d49187e9 | 655 | req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
a07b4970 CH |
656 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
657 | ||
658 | out: | |
659 | mutex_unlock(&subsys->lock); | |
660 | nvmet_subsys_put(subsys); | |
661 | return status; | |
662 | } | |
663 | ||
664 | static bool __nvmet_host_allowed(struct nvmet_subsys *subsys, | |
665 | const char *hostnqn) | |
666 | { | |
667 | struct nvmet_host_link *p; | |
668 | ||
669 | if (subsys->allow_any_host) | |
670 | return true; | |
671 | ||
672 | list_for_each_entry(p, &subsys->hosts, entry) { | |
673 | if (!strcmp(nvmet_host_name(p->host), hostnqn)) | |
674 | return true; | |
675 | } | |
676 | ||
677 | return false; | |
678 | } | |
679 | ||
680 | static bool nvmet_host_discovery_allowed(struct nvmet_req *req, | |
681 | const char *hostnqn) | |
682 | { | |
683 | struct nvmet_subsys_link *s; | |
684 | ||
685 | list_for_each_entry(s, &req->port->subsystems, entry) { | |
686 | if (__nvmet_host_allowed(s->subsys, hostnqn)) | |
687 | return true; | |
688 | } | |
689 | ||
690 | return false; | |
691 | } | |
692 | ||
693 | bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, | |
694 | const char *hostnqn) | |
695 | { | |
696 | lockdep_assert_held(&nvmet_config_sem); | |
697 | ||
698 | if (subsys->type == NVME_NQN_DISC) | |
699 | return nvmet_host_discovery_allowed(req, hostnqn); | |
700 | else | |
701 | return __nvmet_host_allowed(subsys, hostnqn); | |
702 | } | |
703 | ||
704 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, | |
705 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) | |
706 | { | |
707 | struct nvmet_subsys *subsys; | |
708 | struct nvmet_ctrl *ctrl; | |
709 | int ret; | |
710 | u16 status; | |
711 | ||
712 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
713 | subsys = nvmet_find_get_subsys(req->port, subsysnqn); | |
714 | if (!subsys) { | |
715 | pr_warn("connect request for invalid subsystem %s!\n", | |
716 | subsysnqn); | |
d49187e9 | 717 | req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
a07b4970 CH |
718 | goto out; |
719 | } | |
720 | ||
721 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
722 | down_read(&nvmet_config_sem); | |
723 | if (!nvmet_host_allowed(req, subsys, hostnqn)) { | |
724 | pr_info("connect by host %s for subsystem %s not allowed\n", | |
725 | hostnqn, subsysnqn); | |
d49187e9 | 726 | req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); |
a07b4970 CH |
727 | up_read(&nvmet_config_sem); |
728 | goto out_put_subsystem; | |
729 | } | |
730 | up_read(&nvmet_config_sem); | |
731 | ||
732 | status = NVME_SC_INTERNAL; | |
733 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | |
734 | if (!ctrl) | |
735 | goto out_put_subsystem; | |
736 | mutex_init(&ctrl->lock); | |
737 | ||
738 | nvmet_init_cap(ctrl); | |
739 | ||
740 | INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); | |
741 | INIT_LIST_HEAD(&ctrl->async_events); | |
742 | ||
743 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); | |
744 | memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); | |
745 | ||
28b89118 SG |
746 | /* generate a random serial number as our controllers are ephemeral: */ |
747 | get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); | |
748 | ||
a07b4970 CH |
749 | kref_init(&ctrl->ref); |
750 | ctrl->subsys = subsys; | |
751 | ||
752 | ctrl->cqs = kcalloc(subsys->max_qid + 1, | |
753 | sizeof(struct nvmet_cq *), | |
754 | GFP_KERNEL); | |
755 | if (!ctrl->cqs) | |
756 | goto out_free_ctrl; | |
757 | ||
758 | ctrl->sqs = kcalloc(subsys->max_qid + 1, | |
759 | sizeof(struct nvmet_sq *), | |
760 | GFP_KERNEL); | |
761 | if (!ctrl->sqs) | |
762 | goto out_free_cqs; | |
763 | ||
15fbad96 | 764 | ret = ida_simple_get(&cntlid_ida, |
a07b4970 CH |
765 | NVME_CNTLID_MIN, NVME_CNTLID_MAX, |
766 | GFP_KERNEL); | |
767 | if (ret < 0) { | |
768 | status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; | |
769 | goto out_free_sqs; | |
770 | } | |
771 | ctrl->cntlid = ret; | |
772 | ||
773 | ctrl->ops = req->ops; | |
774 | if (ctrl->subsys->type == NVME_NQN_DISC) { | |
775 | /* Don't accept keep-alive timeout for discovery controllers */ | |
776 | if (kato) { | |
777 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
778 | goto out_free_sqs; | |
779 | } | |
780 | ||
781 | /* | |
782 | * Discovery controllers use some arbitrary high value in order | |
783 | * to cleanup stale discovery sessions | |
784 | * | |
785 | * From the latest base diff RC: | |
786 | * "The Keep Alive command is not supported by | |
787 | * Discovery controllers. A transport may specify a | |
788 | * fixed Discovery controller activity timeout value | |
789 | * (e.g., 2 minutes). If no commands are received | |
790 | * by a Discovery controller within that time | |
791 | * period, the controller may perform the | |
792 | * actions for Keep Alive Timer expiration". | |
793 | */ | |
794 | ctrl->kato = NVMET_DISC_KATO; | |
795 | } else { | |
796 | /* keep-alive timeout in seconds */ | |
797 | ctrl->kato = DIV_ROUND_UP(kato, 1000); | |
798 | } | |
799 | nvmet_start_keep_alive_timer(ctrl); | |
800 | ||
801 | mutex_lock(&subsys->lock); | |
802 | list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); | |
803 | mutex_unlock(&subsys->lock); | |
804 | ||
805 | *ctrlp = ctrl; | |
806 | return 0; | |
807 | ||
808 | out_free_sqs: | |
809 | kfree(ctrl->sqs); | |
810 | out_free_cqs: | |
811 | kfree(ctrl->cqs); | |
812 | out_free_ctrl: | |
813 | kfree(ctrl); | |
814 | out_put_subsystem: | |
815 | nvmet_subsys_put(subsys); | |
816 | out: | |
817 | return status; | |
818 | } | |
819 | ||
820 | static void nvmet_ctrl_free(struct kref *ref) | |
821 | { | |
822 | struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); | |
823 | struct nvmet_subsys *subsys = ctrl->subsys; | |
824 | ||
825 | nvmet_stop_keep_alive_timer(ctrl); | |
826 | ||
827 | mutex_lock(&subsys->lock); | |
828 | list_del(&ctrl->subsys_entry); | |
829 | mutex_unlock(&subsys->lock); | |
830 | ||
06406d81 SG |
831 | flush_work(&ctrl->async_event_work); |
832 | cancel_work_sync(&ctrl->fatal_err_work); | |
833 | ||
15fbad96 | 834 | ida_simple_remove(&cntlid_ida, ctrl->cntlid); |
a07b4970 CH |
835 | nvmet_subsys_put(subsys); |
836 | ||
837 | kfree(ctrl->sqs); | |
838 | kfree(ctrl->cqs); | |
839 | kfree(ctrl); | |
840 | } | |
841 | ||
842 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) | |
843 | { | |
844 | kref_put(&ctrl->ref, nvmet_ctrl_free); | |
845 | } | |
846 | ||
847 | static void nvmet_fatal_error_handler(struct work_struct *work) | |
848 | { | |
849 | struct nvmet_ctrl *ctrl = | |
850 | container_of(work, struct nvmet_ctrl, fatal_err_work); | |
851 | ||
852 | pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); | |
853 | ctrl->ops->delete_ctrl(ctrl); | |
854 | } | |
855 | ||
856 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) | |
857 | { | |
8242ddac SG |
858 | mutex_lock(&ctrl->lock); |
859 | if (!(ctrl->csts & NVME_CSTS_CFS)) { | |
860 | ctrl->csts |= NVME_CSTS_CFS; | |
861 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); | |
862 | schedule_work(&ctrl->fatal_err_work); | |
863 | } | |
864 | mutex_unlock(&ctrl->lock); | |
a07b4970 CH |
865 | } |
866 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); | |
867 | ||
868 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, | |
869 | const char *subsysnqn) | |
870 | { | |
871 | struct nvmet_subsys_link *p; | |
872 | ||
873 | if (!port) | |
874 | return NULL; | |
875 | ||
876 | if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn, | |
877 | NVMF_NQN_SIZE)) { | |
878 | if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) | |
879 | return NULL; | |
880 | return nvmet_disc_subsys; | |
881 | } | |
882 | ||
883 | down_read(&nvmet_config_sem); | |
884 | list_for_each_entry(p, &port->subsystems, entry) { | |
885 | if (!strncmp(p->subsys->subsysnqn, subsysnqn, | |
886 | NVMF_NQN_SIZE)) { | |
887 | if (!kref_get_unless_zero(&p->subsys->ref)) | |
888 | break; | |
889 | up_read(&nvmet_config_sem); | |
890 | return p->subsys; | |
891 | } | |
892 | } | |
893 | up_read(&nvmet_config_sem); | |
894 | return NULL; | |
895 | } | |
896 | ||
897 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, | |
898 | enum nvme_subsys_type type) | |
899 | { | |
900 | struct nvmet_subsys *subsys; | |
901 | ||
902 | subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); | |
903 | if (!subsys) | |
904 | return NULL; | |
905 | ||
8ef2074d | 906 | subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */ |
a07b4970 CH |
907 | |
908 | switch (type) { | |
909 | case NVME_NQN_NVME: | |
910 | subsys->max_qid = NVMET_NR_QUEUES; | |
911 | break; | |
912 | case NVME_NQN_DISC: | |
913 | subsys->max_qid = 0; | |
914 | break; | |
915 | default: | |
916 | pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); | |
917 | kfree(subsys); | |
918 | return NULL; | |
919 | } | |
920 | subsys->type = type; | |
921 | subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, | |
922 | GFP_KERNEL); | |
69555af2 | 923 | if (!subsys->subsysnqn) { |
a07b4970 CH |
924 | kfree(subsys); |
925 | return NULL; | |
926 | } | |
927 | ||
928 | kref_init(&subsys->ref); | |
929 | ||
930 | mutex_init(&subsys->lock); | |
931 | INIT_LIST_HEAD(&subsys->namespaces); | |
932 | INIT_LIST_HEAD(&subsys->ctrls); | |
a07b4970 CH |
933 | INIT_LIST_HEAD(&subsys->hosts); |
934 | ||
935 | return subsys; | |
936 | } | |
937 | ||
938 | static void nvmet_subsys_free(struct kref *ref) | |
939 | { | |
940 | struct nvmet_subsys *subsys = | |
941 | container_of(ref, struct nvmet_subsys, ref); | |
942 | ||
943 | WARN_ON_ONCE(!list_empty(&subsys->namespaces)); | |
944 | ||
a07b4970 CH |
945 | kfree(subsys->subsysnqn); |
946 | kfree(subsys); | |
947 | } | |
948 | ||
344770b0 SG |
949 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) |
950 | { | |
951 | struct nvmet_ctrl *ctrl; | |
952 | ||
953 | mutex_lock(&subsys->lock); | |
954 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
955 | ctrl->ops->delete_ctrl(ctrl); | |
956 | mutex_unlock(&subsys->lock); | |
957 | } | |
958 | ||
a07b4970 CH |
959 | void nvmet_subsys_put(struct nvmet_subsys *subsys) |
960 | { | |
961 | kref_put(&subsys->ref, nvmet_subsys_free); | |
962 | } | |
963 | ||
964 | static int __init nvmet_init(void) | |
965 | { | |
966 | int error; | |
967 | ||
968 | error = nvmet_init_discovery(); | |
969 | if (error) | |
970 | goto out; | |
971 | ||
972 | error = nvmet_init_configfs(); | |
973 | if (error) | |
974 | goto out_exit_discovery; | |
975 | return 0; | |
976 | ||
977 | out_exit_discovery: | |
978 | nvmet_exit_discovery(); | |
979 | out: | |
980 | return error; | |
981 | } | |
982 | ||
983 | static void __exit nvmet_exit(void) | |
984 | { | |
985 | nvmet_exit_configfs(); | |
986 | nvmet_exit_discovery(); | |
15fbad96 | 987 | ida_destroy(&cntlid_ida); |
a07b4970 CH |
988 | |
989 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); | |
990 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); | |
991 | } | |
992 | ||
993 | module_init(nvmet_init); | |
994 | module_exit(nvmet_exit); | |
995 | ||
996 | MODULE_LICENSE("GPL v2"); |