]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/nvme/host/core.c
nvme: introduce struct nvme_request
[mirror_ubuntu-zesty-kernel.git] / drivers / nvme / host / core.c
CommitLineData
21d34711
CH
1/*
2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/blkdev.h>
16#include <linux/blk-mq.h>
5fd4ce1b 17#include <linux/delay.h>
21d34711 18#include <linux/errno.h>
1673f1f0 19#include <linux/hdreg.h>
21d34711 20#include <linux/kernel.h>
5bae7f73
CH
21#include <linux/module.h>
22#include <linux/list_sort.h>
21d34711
CH
23#include <linux/slab.h>
24#include <linux/types.h>
1673f1f0
CH
25#include <linux/pr.h>
26#include <linux/ptrace.h>
27#include <linux/nvme_ioctl.h>
28#include <linux/t10-pi.h>
29#include <scsi/sg.h>
30#include <asm/unaligned.h>
21d34711
CH
31
32#include "nvme.h"
038bd4cb 33#include "fabrics.h"
21d34711 34
f3ca80fc
CH
35#define NVME_MINORS (1U << MINORBITS)
36
ba0ba7d3
ML
37unsigned char admin_timeout = 60;
38module_param(admin_timeout, byte, 0644);
39MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
576d55d6 40EXPORT_SYMBOL_GPL(admin_timeout);
ba0ba7d3
ML
41
42unsigned char nvme_io_timeout = 30;
43module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
44MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
576d55d6 45EXPORT_SYMBOL_GPL(nvme_io_timeout);
ba0ba7d3
ML
46
47unsigned char shutdown_timeout = 5;
48module_param(shutdown_timeout, byte, 0644);
49MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
50
f80ec966
KB
51unsigned int nvme_max_retries = 5;
52module_param_named(max_retries, nvme_max_retries, uint, 0644);
53MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
54EXPORT_SYMBOL_GPL(nvme_max_retries);
5bae7f73 55
f3ca80fc
CH
56static int nvme_char_major;
57module_param(nvme_char_major, int, 0);
58
59static LIST_HEAD(nvme_ctrl_list);
9f2482b9 60static DEFINE_SPINLOCK(dev_list_lock);
1673f1f0 61
f3ca80fc
CH
62static struct class *nvme_class;
63
c55a2fd4
ML
64void nvme_cancel_request(struct request *req, void *data, bool reserved)
65{
66 int status;
67
68 if (!blk_mq_request_started(req))
69 return;
70
71 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
72 "Cancelling I/O %d", req->tag);
73
74 status = NVME_SC_ABORT_REQ;
75 if (blk_queue_dying(req->q))
76 status |= NVME_SC_DNR;
77 blk_mq_complete_request(req, status);
78}
79EXPORT_SYMBOL_GPL(nvme_cancel_request);
80
bb8d261e
CH
81bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
82 enum nvme_ctrl_state new_state)
83{
f6b6a28e 84 enum nvme_ctrl_state old_state;
bb8d261e
CH
85 bool changed = false;
86
87 spin_lock_irq(&ctrl->lock);
f6b6a28e
GKB
88
89 old_state = ctrl->state;
bb8d261e
CH
90 switch (new_state) {
91 case NVME_CTRL_LIVE:
92 switch (old_state) {
7d2e8008 93 case NVME_CTRL_NEW:
bb8d261e 94 case NVME_CTRL_RESETTING:
def61eca 95 case NVME_CTRL_RECONNECTING:
bb8d261e
CH
96 changed = true;
97 /* FALLTHRU */
98 default:
99 break;
100 }
101 break;
102 case NVME_CTRL_RESETTING:
103 switch (old_state) {
104 case NVME_CTRL_NEW:
def61eca
CH
105 case NVME_CTRL_LIVE:
106 case NVME_CTRL_RECONNECTING:
107 changed = true;
108 /* FALLTHRU */
109 default:
110 break;
111 }
112 break;
113 case NVME_CTRL_RECONNECTING:
114 switch (old_state) {
bb8d261e
CH
115 case NVME_CTRL_LIVE:
116 changed = true;
117 /* FALLTHRU */
118 default:
119 break;
120 }
121 break;
122 case NVME_CTRL_DELETING:
123 switch (old_state) {
124 case NVME_CTRL_LIVE:
125 case NVME_CTRL_RESETTING:
def61eca 126 case NVME_CTRL_RECONNECTING:
bb8d261e
CH
127 changed = true;
128 /* FALLTHRU */
129 default:
130 break;
131 }
132 break;
0ff9d4e1
KB
133 case NVME_CTRL_DEAD:
134 switch (old_state) {
135 case NVME_CTRL_DELETING:
136 changed = true;
137 /* FALLTHRU */
138 default:
139 break;
140 }
141 break;
bb8d261e
CH
142 default:
143 break;
144 }
bb8d261e
CH
145
146 if (changed)
147 ctrl->state = new_state;
148
f6b6a28e
GKB
149 spin_unlock_irq(&ctrl->lock);
150
bb8d261e
CH
151 return changed;
152}
153EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
154
1673f1f0
CH
155static void nvme_free_ns(struct kref *kref)
156{
157 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
158
b0b4e09c
MB
159 if (ns->ndev)
160 nvme_nvm_unregister(ns);
1673f1f0 161
b0b4e09c
MB
162 if (ns->disk) {
163 spin_lock(&dev_list_lock);
164 ns->disk->private_data = NULL;
165 spin_unlock(&dev_list_lock);
166 }
1673f1f0 167
1673f1f0 168 put_disk(ns->disk);
075790eb
KB
169 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
170 nvme_put_ctrl(ns->ctrl);
1673f1f0
CH
171 kfree(ns);
172}
173
5bae7f73 174static void nvme_put_ns(struct nvme_ns *ns)
1673f1f0
CH
175{
176 kref_put(&ns->kref, nvme_free_ns);
177}
178
179static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
180{
181 struct nvme_ns *ns;
182
183 spin_lock(&dev_list_lock);
184 ns = disk->private_data;
e439bb12
SG
185 if (ns) {
186 if (!kref_get_unless_zero(&ns->kref))
187 goto fail;
188 if (!try_module_get(ns->ctrl->ops->module))
189 goto fail_put_ns;
190 }
1673f1f0
CH
191 spin_unlock(&dev_list_lock);
192
193 return ns;
e439bb12
SG
194
195fail_put_ns:
196 kref_put(&ns->kref, nvme_free_ns);
197fail:
198 spin_unlock(&dev_list_lock);
199 return NULL;
1673f1f0
CH
200}
201
7688faa6
CH
202void nvme_requeue_req(struct request *req)
203{
a6eaa884 204 blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
7688faa6 205}
576d55d6 206EXPORT_SYMBOL_GPL(nvme_requeue_req);
7688faa6 207
4160982e 208struct request *nvme_alloc_request(struct request_queue *q,
eb71f435 209 struct nvme_command *cmd, unsigned int flags, int qid)
21d34711 210{
21d34711 211 struct request *req;
21d34711 212
eb71f435
CH
213 if (qid == NVME_QID_ANY) {
214 req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
215 } else {
216 req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
217 qid ? qid - 1 : 0);
218 }
21d34711 219 if (IS_ERR(req))
4160982e 220 return req;
21d34711
CH
221
222 req->cmd_type = REQ_TYPE_DRV_PRIV;
223 req->cmd_flags |= REQ_FAILFAST_DRIVER;
d49187e9 224 nvme_req(req)->cmd = cmd;
21d34711 225
4160982e
CH
226 return req;
227}
576d55d6 228EXPORT_SYMBOL_GPL(nvme_alloc_request);
4160982e 229
8093f7ca
ML
230static inline void nvme_setup_flush(struct nvme_ns *ns,
231 struct nvme_command *cmnd)
232{
233 memset(cmnd, 0, sizeof(*cmnd));
234 cmnd->common.opcode = nvme_cmd_flush;
235 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
236}
237
238static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
239 struct nvme_command *cmnd)
240{
241 struct nvme_dsm_range *range;
242 struct page *page;
243 int offset;
244 unsigned int nr_bytes = blk_rq_bytes(req);
245
246 range = kmalloc(sizeof(*range), GFP_ATOMIC);
247 if (!range)
248 return BLK_MQ_RQ_QUEUE_BUSY;
249
250 range->cattr = cpu_to_le32(0);
251 range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
252 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
253
254 memset(cmnd, 0, sizeof(*cmnd));
255 cmnd->dsm.opcode = nvme_cmd_dsm;
256 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
257 cmnd->dsm.nr = 0;
258 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
259
260 req->completion_data = range;
261 page = virt_to_page(range);
262 offset = offset_in_page(range);
263 blk_add_request_payload(req, page, offset, sizeof(*range));
264
265 /*
266 * we set __data_len back to the size of the area to be discarded
267 * on disk. This allows us to report completion on the full amount
268 * of blocks described by the request.
269 */
270 req->__data_len = nr_bytes;
271
272 return 0;
273}
274
275static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
276 struct nvme_command *cmnd)
277{
278 u16 control = 0;
279 u32 dsmgmt = 0;
280
281 if (req->cmd_flags & REQ_FUA)
282 control |= NVME_RW_FUA;
283 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
284 control |= NVME_RW_LR;
285
286 if (req->cmd_flags & REQ_RAHEAD)
287 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
288
289 memset(cmnd, 0, sizeof(*cmnd));
290 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
291 cmnd->rw.command_id = req->tag;
292 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
293 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
294 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
295
296 if (ns->ms) {
297 switch (ns->pi_type) {
298 case NVME_NS_DPS_PI_TYPE3:
299 control |= NVME_RW_PRINFO_PRCHK_GUARD;
300 break;
301 case NVME_NS_DPS_PI_TYPE1:
302 case NVME_NS_DPS_PI_TYPE2:
303 control |= NVME_RW_PRINFO_PRCHK_GUARD |
304 NVME_RW_PRINFO_PRCHK_REF;
305 cmnd->rw.reftag = cpu_to_le32(
306 nvme_block_nr(ns, blk_rq_pos(req)));
307 break;
308 }
309 if (!blk_integrity_rq(req))
310 control |= NVME_RW_PRINFO_PRACT;
311 }
312
313 cmnd->rw.control = cpu_to_le16(control);
314 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
315}
316
317int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
318 struct nvme_command *cmd)
319{
320 int ret = 0;
321
322 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
d49187e9 323 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
3a5e02ce 324 else if (req_op(req) == REQ_OP_FLUSH)
8093f7ca 325 nvme_setup_flush(ns, cmd);
c2df40df 326 else if (req_op(req) == REQ_OP_DISCARD)
8093f7ca
ML
327 ret = nvme_setup_discard(ns, req, cmd);
328 else
329 nvme_setup_rw(ns, req, cmd);
330
331 return ret;
332}
333EXPORT_SYMBOL_GPL(nvme_setup_cmd);
334
4160982e
CH
335/*
336 * Returns 0 on success. If the result is negative, it's a Linux error code;
337 * if the result is positive, it's an NVM Express status code
338 */
339int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
d49187e9 340 union nvme_result *result, void *buffer, unsigned bufflen,
eb71f435 341 unsigned timeout, int qid, int at_head, int flags)
4160982e
CH
342{
343 struct request *req;
344 int ret;
345
eb71f435 346 req = nvme_alloc_request(q, cmd, flags, qid);
4160982e
CH
347 if (IS_ERR(req))
348 return PTR_ERR(req);
349
350 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
351
21d34711
CH
352 if (buffer && bufflen) {
353 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
354 if (ret)
355 goto out;
4160982e
CH
356 }
357
eb71f435 358 blk_execute_rq(req->q, NULL, req, at_head);
d49187e9
CH
359 if (result)
360 *result = nvme_req(req)->result;
4160982e
CH
361 ret = req->errors;
362 out:
363 blk_mq_free_request(req);
364 return ret;
365}
eb71f435 366EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
4160982e
CH
367
368int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
369 void *buffer, unsigned bufflen)
370{
eb71f435
CH
371 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
372 NVME_QID_ANY, 0, 0);
4160982e 373}
576d55d6 374EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
4160982e 375
0b7f1f26
KB
376int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
377 void __user *ubuffer, unsigned bufflen,
378 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
379 u32 *result, unsigned timeout)
4160982e 380{
7a5abb4b 381 bool write = nvme_is_write(cmd);
0b7f1f26
KB
382 struct nvme_ns *ns = q->queuedata;
383 struct gendisk *disk = ns ? ns->disk : NULL;
4160982e 384 struct request *req;
0b7f1f26
KB
385 struct bio *bio = NULL;
386 void *meta = NULL;
4160982e
CH
387 int ret;
388
eb71f435 389 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
4160982e
CH
390 if (IS_ERR(req))
391 return PTR_ERR(req);
392
393 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
394
395 if (ubuffer && bufflen) {
21d34711
CH
396 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
397 GFP_KERNEL);
398 if (ret)
399 goto out;
400 bio = req->bio;
21d34711 401
0b7f1f26
KB
402 if (!disk)
403 goto submit;
404 bio->bi_bdev = bdget_disk(disk, 0);
405 if (!bio->bi_bdev) {
406 ret = -ENODEV;
407 goto out_unmap;
408 }
409
e9fc63d6 410 if (meta_buffer && meta_len) {
0b7f1f26
KB
411 struct bio_integrity_payload *bip;
412
413 meta = kmalloc(meta_len, GFP_KERNEL);
414 if (!meta) {
415 ret = -ENOMEM;
416 goto out_unmap;
417 }
418
419 if (write) {
420 if (copy_from_user(meta, meta_buffer,
421 meta_len)) {
422 ret = -EFAULT;
423 goto out_free_meta;
424 }
425 }
426
427 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
06c1e390
KB
428 if (IS_ERR(bip)) {
429 ret = PTR_ERR(bip);
0b7f1f26
KB
430 goto out_free_meta;
431 }
432
433 bip->bip_iter.bi_size = meta_len;
434 bip->bip_iter.bi_sector = meta_seed;
435
436 ret = bio_integrity_add_page(bio, virt_to_page(meta),
437 meta_len, offset_in_page(meta));
438 if (ret != meta_len) {
439 ret = -ENOMEM;
440 goto out_free_meta;
441 }
442 }
443 }
444 submit:
445 blk_execute_rq(req->q, disk, req, 0);
446 ret = req->errors;
21d34711 447 if (result)
d49187e9 448 *result = le32_to_cpu(nvme_req(req)->result.u32);
0b7f1f26
KB
449 if (meta && !ret && !write) {
450 if (copy_to_user(meta_buffer, meta, meta_len))
451 ret = -EFAULT;
452 }
453 out_free_meta:
454 kfree(meta);
455 out_unmap:
456 if (bio) {
457 if (disk && bio->bi_bdev)
458 bdput(bio->bi_bdev);
459 blk_rq_unmap_user(bio);
460 }
21d34711
CH
461 out:
462 blk_mq_free_request(req);
463 return ret;
464}
465
0b7f1f26
KB
466int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
467 void __user *ubuffer, unsigned bufflen, u32 *result,
468 unsigned timeout)
469{
470 return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
471 result, timeout);
472}
473
038bd4cb
SG
474static void nvme_keep_alive_end_io(struct request *rq, int error)
475{
476 struct nvme_ctrl *ctrl = rq->end_io_data;
477
478 blk_mq_free_request(rq);
479
480 if (error) {
481 dev_err(ctrl->device,
482 "failed nvme_keep_alive_end_io error=%d\n", error);
483 return;
484 }
485
486 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
487}
488
489static int nvme_keep_alive(struct nvme_ctrl *ctrl)
490{
491 struct nvme_command c;
492 struct request *rq;
493
494 memset(&c, 0, sizeof(c));
495 c.common.opcode = nvme_admin_keep_alive;
496
497 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
498 NVME_QID_ANY);
499 if (IS_ERR(rq))
500 return PTR_ERR(rq);
501
502 rq->timeout = ctrl->kato * HZ;
503 rq->end_io_data = ctrl;
504
505 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
506
507 return 0;
508}
509
510static void nvme_keep_alive_work(struct work_struct *work)
511{
512 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
513 struct nvme_ctrl, ka_work);
514
515 if (nvme_keep_alive(ctrl)) {
516 /* allocation failure, reset the controller */
517 dev_err(ctrl->device, "keep-alive failed\n");
518 ctrl->ops->reset_ctrl(ctrl);
519 return;
520 }
521}
522
523void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
524{
525 if (unlikely(ctrl->kato == 0))
526 return;
527
528 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
529 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
530}
531EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
532
533void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
534{
535 if (unlikely(ctrl->kato == 0))
536 return;
537
538 cancel_delayed_work_sync(&ctrl->ka_work);
539}
540EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
541
1c63dc66 542int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
21d34711
CH
543{
544 struct nvme_command c = { };
545 int error;
546
547 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
548 c.identify.opcode = nvme_admin_identify;
549 c.identify.cns = cpu_to_le32(1);
550
551 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
552 if (!*id)
553 return -ENOMEM;
554
555 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
556 sizeof(struct nvme_id_ctrl));
557 if (error)
558 kfree(*id);
559 return error;
560}
561
540c801c
KB
562static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
563{
564 struct nvme_command c = { };
565
566 c.identify.opcode = nvme_admin_identify;
567 c.identify.cns = cpu_to_le32(2);
568 c.identify.nsid = cpu_to_le32(nsid);
569 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
570}
571
1c63dc66 572int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
21d34711
CH
573 struct nvme_id_ns **id)
574{
575 struct nvme_command c = { };
576 int error;
577
578 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
579 c.identify.opcode = nvme_admin_identify,
580 c.identify.nsid = cpu_to_le32(nsid),
581
582 *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
583 if (!*id)
584 return -ENOMEM;
585
586 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
587 sizeof(struct nvme_id_ns));
588 if (error)
589 kfree(*id);
590 return error;
591}
592
1c63dc66 593int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
1a6fe74d 594 void *buffer, size_t buflen, u32 *result)
21d34711
CH
595{
596 struct nvme_command c;
d49187e9 597 union nvme_result res;
1cb3cce5 598 int ret;
21d34711
CH
599
600 memset(&c, 0, sizeof(c));
601 c.features.opcode = nvme_admin_get_features;
602 c.features.nsid = cpu_to_le32(nsid);
21d34711
CH
603 c.features.fid = cpu_to_le32(fid);
604
d49187e9 605 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
eb71f435 606 NVME_QID_ANY, 0, 0);
9b47f77a 607 if (ret >= 0 && result)
d49187e9 608 *result = le32_to_cpu(res.u32);
1cb3cce5 609 return ret;
21d34711
CH
610}
611
1c63dc66 612int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
1a6fe74d 613 void *buffer, size_t buflen, u32 *result)
21d34711
CH
614{
615 struct nvme_command c;
d49187e9 616 union nvme_result res;
1cb3cce5 617 int ret;
21d34711
CH
618
619 memset(&c, 0, sizeof(c));
620 c.features.opcode = nvme_admin_set_features;
21d34711
CH
621 c.features.fid = cpu_to_le32(fid);
622 c.features.dword11 = cpu_to_le32(dword11);
623
d49187e9 624 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1a6fe74d 625 buffer, buflen, 0, NVME_QID_ANY, 0, 0);
9b47f77a 626 if (ret >= 0 && result)
d49187e9 627 *result = le32_to_cpu(res.u32);
1cb3cce5 628 return ret;
21d34711
CH
629}
630
1c63dc66 631int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
21d34711
CH
632{
633 struct nvme_command c = { };
634 int error;
635
636 c.common.opcode = nvme_admin_get_log_page,
637 c.common.nsid = cpu_to_le32(0xFFFFFFFF),
638 c.common.cdw10[0] = cpu_to_le32(
639 (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
640 NVME_LOG_SMART),
641
642 *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
643 if (!*log)
644 return -ENOMEM;
645
646 error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
647 sizeof(struct nvme_smart_log));
648 if (error)
649 kfree(*log);
650 return error;
651}
1673f1f0 652
9a0be7ab
CH
653int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
654{
655 u32 q_count = (*count - 1) | ((*count - 1) << 16);
656 u32 result;
657 int status, nr_io_queues;
658
1a6fe74d 659 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
9a0be7ab 660 &result);
f5fa90dc 661 if (status < 0)
9a0be7ab
CH
662 return status;
663
f5fa90dc
CH
664 /*
665 * Degraded controllers might return an error when setting the queue
666 * count. We still want to be able to bring them online and offer
667 * access to the admin queue, as that might be only way to fix them up.
668 */
669 if (status > 0) {
670 dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
671 *count = 0;
672 } else {
673 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
674 *count = min(*count, nr_io_queues);
675 }
676
9a0be7ab
CH
677 return 0;
678}
576d55d6 679EXPORT_SYMBOL_GPL(nvme_set_queue_count);
9a0be7ab 680
1673f1f0
CH
681static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
682{
683 struct nvme_user_io io;
684 struct nvme_command c;
685 unsigned length, meta_len;
686 void __user *metadata;
687
688 if (copy_from_user(&io, uio, sizeof(io)))
689 return -EFAULT;
63088ec7
KB
690 if (io.flags)
691 return -EINVAL;
1673f1f0
CH
692
693 switch (io.opcode) {
694 case nvme_cmd_write:
695 case nvme_cmd_read:
696 case nvme_cmd_compare:
697 break;
698 default:
699 return -EINVAL;
700 }
701
702 length = (io.nblocks + 1) << ns->lba_shift;
703 meta_len = (io.nblocks + 1) * ns->ms;
704 metadata = (void __user *)(uintptr_t)io.metadata;
705
706 if (ns->ext) {
707 length += meta_len;
708 meta_len = 0;
709 } else if (meta_len) {
710 if ((io.metadata & 3) || !io.metadata)
711 return -EINVAL;
712 }
713
714 memset(&c, 0, sizeof(c));
715 c.rw.opcode = io.opcode;
716 c.rw.flags = io.flags;
717 c.rw.nsid = cpu_to_le32(ns->ns_id);
718 c.rw.slba = cpu_to_le64(io.slba);
719 c.rw.length = cpu_to_le16(io.nblocks);
720 c.rw.control = cpu_to_le16(io.control);
721 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
722 c.rw.reftag = cpu_to_le32(io.reftag);
723 c.rw.apptag = cpu_to_le16(io.apptag);
724 c.rw.appmask = cpu_to_le16(io.appmask);
725
726 return __nvme_submit_user_cmd(ns->queue, &c,
727 (void __user *)(uintptr_t)io.addr, length,
728 metadata, meta_len, io.slba, NULL, 0);
729}
730
f3ca80fc 731static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1673f1f0
CH
732 struct nvme_passthru_cmd __user *ucmd)
733{
734 struct nvme_passthru_cmd cmd;
735 struct nvme_command c;
736 unsigned timeout = 0;
737 int status;
738
739 if (!capable(CAP_SYS_ADMIN))
740 return -EACCES;
741 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
742 return -EFAULT;
63088ec7
KB
743 if (cmd.flags)
744 return -EINVAL;
1673f1f0
CH
745
746 memset(&c, 0, sizeof(c));
747 c.common.opcode = cmd.opcode;
748 c.common.flags = cmd.flags;
749 c.common.nsid = cpu_to_le32(cmd.nsid);
750 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
751 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
752 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
753 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
754 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
755 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
756 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
757 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
758
759 if (cmd.timeout_ms)
760 timeout = msecs_to_jiffies(cmd.timeout_ms);
761
762 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
d1ea7be5 763 (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1673f1f0
CH
764 &cmd.result, timeout);
765 if (status >= 0) {
766 if (put_user(cmd.result, &ucmd->result))
767 return -EFAULT;
768 }
769
770 return status;
771}
772
773static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
774 unsigned int cmd, unsigned long arg)
775{
776 struct nvme_ns *ns = bdev->bd_disk->private_data;
777
778 switch (cmd) {
779 case NVME_IOCTL_ID:
780 force_successful_syscall_return();
781 return ns->ns_id;
782 case NVME_IOCTL_ADMIN_CMD:
783 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
784 case NVME_IOCTL_IO_CMD:
785 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
786 case NVME_IOCTL_SUBMIT_IO:
787 return nvme_submit_io(ns, (void __user *)arg);
44907332 788#ifdef CONFIG_BLK_DEV_NVME_SCSI
1673f1f0
CH
789 case SG_GET_VERSION_NUM:
790 return nvme_sg_get_version_num((void __user *)arg);
791 case SG_IO:
792 return nvme_sg_io(ns, (void __user *)arg);
44907332 793#endif
1673f1f0
CH
794 default:
795 return -ENOTTY;
796 }
797}
798
799#ifdef CONFIG_COMPAT
800static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
801 unsigned int cmd, unsigned long arg)
802{
803 switch (cmd) {
804 case SG_IO:
805 return -ENOIOCTLCMD;
806 }
807 return nvme_ioctl(bdev, mode, cmd, arg);
808}
809#else
810#define nvme_compat_ioctl NULL
811#endif
812
813static int nvme_open(struct block_device *bdev, fmode_t mode)
814{
815 return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
816}
817
818static void nvme_release(struct gendisk *disk, fmode_t mode)
819{
e439bb12
SG
820 struct nvme_ns *ns = disk->private_data;
821
822 module_put(ns->ctrl->ops->module);
823 nvme_put_ns(ns);
1673f1f0
CH
824}
825
826static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
827{
828 /* some standard values */
829 geo->heads = 1 << 6;
830 geo->sectors = 1 << 5;
831 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
832 return 0;
833}
834
835#ifdef CONFIG_BLK_DEV_INTEGRITY
836static void nvme_init_integrity(struct nvme_ns *ns)
837{
838 struct blk_integrity integrity;
839
fa9a89fc 840 memset(&integrity, 0, sizeof(integrity));
1673f1f0
CH
841 switch (ns->pi_type) {
842 case NVME_NS_DPS_PI_TYPE3:
843 integrity.profile = &t10_pi_type3_crc;
ba36c21b
NB
844 integrity.tag_size = sizeof(u16) + sizeof(u32);
845 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1673f1f0
CH
846 break;
847 case NVME_NS_DPS_PI_TYPE1:
848 case NVME_NS_DPS_PI_TYPE2:
849 integrity.profile = &t10_pi_type1_crc;
ba36c21b
NB
850 integrity.tag_size = sizeof(u16);
851 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1673f1f0
CH
852 break;
853 default:
854 integrity.profile = NULL;
855 break;
856 }
857 integrity.tuple_size = ns->ms;
858 blk_integrity_register(ns->disk, &integrity);
859 blk_queue_max_integrity_segments(ns->queue, 1);
860}
861#else
862static void nvme_init_integrity(struct nvme_ns *ns)
863{
864}
865#endif /* CONFIG_BLK_DEV_INTEGRITY */
866
867static void nvme_config_discard(struct nvme_ns *ns)
868{
08095e70 869 struct nvme_ctrl *ctrl = ns->ctrl;
1673f1f0 870 u32 logical_block_size = queue_logical_block_size(ns->queue);
08095e70
KB
871
872 if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
873 ns->queue->limits.discard_zeroes_data = 1;
874 else
875 ns->queue->limits.discard_zeroes_data = 0;
876
1673f1f0
CH
877 ns->queue->limits.discard_alignment = logical_block_size;
878 ns->queue->limits.discard_granularity = logical_block_size;
bd0fc288 879 blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
1673f1f0
CH
880 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
881}
882
ac81bfa9 883static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
1673f1f0 884{
ac81bfa9 885 if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) {
b0b4e09c 886 dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__);
1673f1f0
CH
887 return -ENODEV;
888 }
1673f1f0 889
ac81bfa9
MB
890 if ((*id)->ncap == 0) {
891 kfree(*id);
892 return -ENODEV;
1673f1f0
CH
893 }
894
2b9b6e86 895 if (ns->ctrl->vs >= NVME_VS(1, 1))
ac81bfa9 896 memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
2b9b6e86 897 if (ns->ctrl->vs >= NVME_VS(1, 2))
ac81bfa9
MB
898 memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
899
900 return 0;
901}
902
903static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
904{
905 struct nvme_ns *ns = disk->private_data;
906 u8 lbaf, pi_type;
907 u16 old_ms;
908 unsigned short bs;
2b9b6e86 909
1673f1f0
CH
910 old_ms = ns->ms;
911 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
912 ns->lba_shift = id->lbaf[lbaf].ds;
913 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
914 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
915
916 /*
917 * If identify namespace failed, use default 512 byte block size so
918 * block layer can use before failing read/write for 0 capacity.
919 */
920 if (ns->lba_shift == 0)
921 ns->lba_shift = 9;
922 bs = 1 << ns->lba_shift;
1673f1f0
CH
923 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
924 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
925 id->dps & NVME_NS_DPS_PI_MASK : 0;
926
927 blk_mq_freeze_queue(disk->queue);
928 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
929 ns->ms != old_ms ||
930 bs != queue_logical_block_size(disk->queue) ||
931 (ns->ms && ns->ext)))
932 blk_integrity_unregister(disk);
933
934 ns->pi_type = pi_type;
935 blk_queue_logical_block_size(ns->queue, bs);
936
4b9d5b15 937 if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
1673f1f0 938 nvme_init_integrity(ns);
1673f1f0
CH
939 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
940 set_capacity(disk, 0);
941 else
942 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
943
944 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
945 nvme_config_discard(ns);
946 blk_mq_unfreeze_queue(disk->queue);
ac81bfa9 947}
1673f1f0 948
ac81bfa9
MB
949static int nvme_revalidate_disk(struct gendisk *disk)
950{
951 struct nvme_ns *ns = disk->private_data;
952 struct nvme_id_ns *id = NULL;
953 int ret;
954
955 if (test_bit(NVME_NS_DEAD, &ns->flags)) {
956 set_capacity(disk, 0);
957 return -ENODEV;
958 }
959
960 ret = nvme_revalidate_ns(ns, &id);
961 if (ret)
962 return ret;
963
964 __nvme_revalidate_disk(disk, id);
1673f1f0 965 kfree(id);
ac81bfa9 966
1673f1f0
CH
967 return 0;
968}
969
970static char nvme_pr_type(enum pr_type type)
971{
972 switch (type) {
973 case PR_WRITE_EXCLUSIVE:
974 return 1;
975 case PR_EXCLUSIVE_ACCESS:
976 return 2;
977 case PR_WRITE_EXCLUSIVE_REG_ONLY:
978 return 3;
979 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
980 return 4;
981 case PR_WRITE_EXCLUSIVE_ALL_REGS:
982 return 5;
983 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
984 return 6;
985 default:
986 return 0;
987 }
988};
989
990static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
991 u64 key, u64 sa_key, u8 op)
992{
993 struct nvme_ns *ns = bdev->bd_disk->private_data;
994 struct nvme_command c;
995 u8 data[16] = { 0, };
996
997 put_unaligned_le64(key, &data[0]);
998 put_unaligned_le64(sa_key, &data[8]);
999
1000 memset(&c, 0, sizeof(c));
1001 c.common.opcode = op;
1002 c.common.nsid = cpu_to_le32(ns->ns_id);
1003 c.common.cdw10[0] = cpu_to_le32(cdw10);
1004
1005 return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
1006}
1007
1008static int nvme_pr_register(struct block_device *bdev, u64 old,
1009 u64 new, unsigned flags)
1010{
1011 u32 cdw10;
1012
1013 if (flags & ~PR_FL_IGNORE_KEY)
1014 return -EOPNOTSUPP;
1015
1016 cdw10 = old ? 2 : 0;
1017 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
1018 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
1019 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
1020}
1021
1022static int nvme_pr_reserve(struct block_device *bdev, u64 key,
1023 enum pr_type type, unsigned flags)
1024{
1025 u32 cdw10;
1026
1027 if (flags & ~PR_FL_IGNORE_KEY)
1028 return -EOPNOTSUPP;
1029
1030 cdw10 = nvme_pr_type(type) << 8;
1031 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
1032 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
1033}
1034
1035static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
1036 enum pr_type type, bool abort)
1037{
1038 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
1039 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
1040}
1041
1042static int nvme_pr_clear(struct block_device *bdev, u64 key)
1043{
8c0b3915 1044 u32 cdw10 = 1 | (key ? 1 << 3 : 0);
1673f1f0
CH
1045 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
1046}
1047
1048static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1049{
1050 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
1051 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
1052}
1053
1054static const struct pr_ops nvme_pr_ops = {
1055 .pr_register = nvme_pr_register,
1056 .pr_reserve = nvme_pr_reserve,
1057 .pr_release = nvme_pr_release,
1058 .pr_preempt = nvme_pr_preempt,
1059 .pr_clear = nvme_pr_clear,
1060};
1061
5bae7f73 1062static const struct block_device_operations nvme_fops = {
1673f1f0
CH
1063 .owner = THIS_MODULE,
1064 .ioctl = nvme_ioctl,
1065 .compat_ioctl = nvme_compat_ioctl,
1066 .open = nvme_open,
1067 .release = nvme_release,
1068 .getgeo = nvme_getgeo,
1069 .revalidate_disk= nvme_revalidate_disk,
1070 .pr_ops = &nvme_pr_ops,
1071};
1072
5fd4ce1b
CH
1073static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1074{
1075 unsigned long timeout =
1076 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1077 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
1078 int ret;
1079
1080 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1081 if ((csts & NVME_CSTS_RDY) == bit)
1082 break;
1083
1084 msleep(100);
1085 if (fatal_signal_pending(current))
1086 return -EINTR;
1087 if (time_after(jiffies, timeout)) {
1b3c47c1 1088 dev_err(ctrl->device,
5fd4ce1b
CH
1089 "Device not ready; aborting %s\n", enabled ?
1090 "initialisation" : "reset");
1091 return -ENODEV;
1092 }
1093 }
1094
1095 return ret;
1096}
1097
1098/*
1099 * If the device has been passed off to us in an enabled state, just clear
1100 * the enabled bit. The spec says we should set the 'shutdown notification
1101 * bits', but doing so may cause the device to complete commands to the
1102 * admin queue ... and we don't know what memory that might be pointing at!
1103 */
1104int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1105{
1106 int ret;
1107
1108 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1109 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
1110
1111 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1112 if (ret)
1113 return ret;
54adc010
GP
1114
1115 /* Checking for ctrl->tagset is a trick to avoid sleeping on module
1116 * load, since we only need the quirk on reset_controller. Notice
1117 * that the HGST device needs this delay only in firmware activation
1118 * procedure; unfortunately we have no (easy) way to verify this.
1119 */
1120 if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
1121 msleep(NVME_QUIRK_DELAY_AMOUNT);
1122
5fd4ce1b
CH
1123 return nvme_wait_ready(ctrl, cap, false);
1124}
576d55d6 1125EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
5fd4ce1b
CH
1126
1127int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1128{
1129 /*
1130 * Default to a 4K page size, with the intention to update this
1131 * path in the future to accomodate architectures with differing
1132 * kernel and IO page sizes.
1133 */
1134 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
1135 int ret;
1136
1137 if (page_shift < dev_page_min) {
1b3c47c1 1138 dev_err(ctrl->device,
5fd4ce1b
CH
1139 "Minimum device page size %u too large for host (%u)\n",
1140 1 << dev_page_min, 1 << page_shift);
1141 return -ENODEV;
1142 }
1143
1144 ctrl->page_size = 1 << page_shift;
1145
1146 ctrl->ctrl_config = NVME_CC_CSS_NVM;
1147 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
1148 ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1149 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1150 ctrl->ctrl_config |= NVME_CC_ENABLE;
1151
1152 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1153 if (ret)
1154 return ret;
1155 return nvme_wait_ready(ctrl, cap, true);
1156}
576d55d6 1157EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
5fd4ce1b
CH
1158
1159int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
1160{
1161 unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
1162 u32 csts;
1163 int ret;
1164
1165 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1166 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
1167
1168 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1169 if (ret)
1170 return ret;
1171
1172 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1173 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
1174 break;
1175
1176 msleep(100);
1177 if (fatal_signal_pending(current))
1178 return -EINTR;
1179 if (time_after(jiffies, timeout)) {
1b3c47c1 1180 dev_err(ctrl->device,
5fd4ce1b
CH
1181 "Device shutdown incomplete; abort shutdown\n");
1182 return -ENODEV;
1183 }
1184 }
1185
1186 return ret;
1187}
576d55d6 1188EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
5fd4ce1b 1189
da35825d
CH
1190static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1191 struct request_queue *q)
1192{
7c88cb00
JA
1193 bool vwc = false;
1194
da35825d 1195 if (ctrl->max_hw_sectors) {
45686b61
CH
1196 u32 max_segments =
1197 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
1198
da35825d 1199 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
45686b61 1200 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
da35825d
CH
1201 }
1202 if (ctrl->stripe_size)
1203 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
da35825d 1204 blk_queue_virt_boundary(q, ctrl->page_size - 1);
7c88cb00
JA
1205 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1206 vwc = true;
1207 blk_queue_write_cache(q, vwc, vwc);
da35825d
CH
1208}
1209
7fd8930f
CH
1210/*
1211 * Initialize the cached copies of the Identify data and various controller
1212 * register in our nvme_ctrl structure. This should be called as soon as
1213 * the admin queue is fully up and running.
1214 */
1215int nvme_init_identify(struct nvme_ctrl *ctrl)
1216{
1217 struct nvme_id_ctrl *id;
1218 u64 cap;
1219 int ret, page_shift;
a229dbf6 1220 u32 max_hw_sectors;
7fd8930f 1221
f3ca80fc
CH
1222 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
1223 if (ret) {
1b3c47c1 1224 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
f3ca80fc
CH
1225 return ret;
1226 }
1227
7fd8930f
CH
1228 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
1229 if (ret) {
1b3c47c1 1230 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
7fd8930f
CH
1231 return ret;
1232 }
1233 page_shift = NVME_CAP_MPSMIN(cap) + 12;
1234
f3ca80fc
CH
1235 if (ctrl->vs >= NVME_VS(1, 1))
1236 ctrl->subsystem = NVME_CAP_NSSRC(cap);
1237
7fd8930f
CH
1238 ret = nvme_identify_ctrl(ctrl, &id);
1239 if (ret) {
1b3c47c1 1240 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
7fd8930f
CH
1241 return -EIO;
1242 }
1243
118472ab 1244 ctrl->vid = le16_to_cpu(id->vid);
7fd8930f 1245 ctrl->oncs = le16_to_cpup(&id->oncs);
6bf25d16 1246 atomic_set(&ctrl->abort_limit, id->acl + 1);
7fd8930f 1247 ctrl->vwc = id->vwc;
931e1c22 1248 ctrl->cntlid = le16_to_cpup(&id->cntlid);
7fd8930f
CH
1249 memcpy(ctrl->serial, id->sn, sizeof(id->sn));
1250 memcpy(ctrl->model, id->mn, sizeof(id->mn));
1251 memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
1252 if (id->mdts)
a229dbf6 1253 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
7fd8930f 1254 else
a229dbf6
CH
1255 max_hw_sectors = UINT_MAX;
1256 ctrl->max_hw_sectors =
1257 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
7fd8930f
CH
1258
1259 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
1260 unsigned int max_hw_sectors;
1261
1262 ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
1263 max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
1264 if (ctrl->max_hw_sectors) {
1265 ctrl->max_hw_sectors = min(max_hw_sectors,
1266 ctrl->max_hw_sectors);
1267 } else {
1268 ctrl->max_hw_sectors = max_hw_sectors;
1269 }
1270 }
1271
da35825d 1272 nvme_set_queue_limits(ctrl, ctrl->admin_q);
07bfcd09 1273 ctrl->sgls = le32_to_cpu(id->sgls);
038bd4cb 1274 ctrl->kas = le16_to_cpu(id->kas);
07bfcd09
CH
1275
1276 if (ctrl->ops->is_fabrics) {
1277 ctrl->icdoff = le16_to_cpu(id->icdoff);
1278 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
1279 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
1280 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
1281
1282 /*
1283 * In fabrics we need to verify the cntlid matches the
1284 * admin connect
1285 */
1286 if (ctrl->cntlid != le16_to_cpu(id->cntlid))
1287 ret = -EINVAL;
038bd4cb
SG
1288
1289 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
1290 dev_err(ctrl->dev,
1291 "keep-alive support is mandatory for fabrics\n");
1292 ret = -EINVAL;
1293 }
07bfcd09
CH
1294 } else {
1295 ctrl->cntlid = le16_to_cpu(id->cntlid);
1296 }
da35825d 1297
7fd8930f 1298 kfree(id);
07bfcd09 1299 return ret;
7fd8930f 1300}
576d55d6 1301EXPORT_SYMBOL_GPL(nvme_init_identify);
7fd8930f 1302
f3ca80fc 1303static int nvme_dev_open(struct inode *inode, struct file *file)
1673f1f0 1304{
f3ca80fc
CH
1305 struct nvme_ctrl *ctrl;
1306 int instance = iminor(inode);
1307 int ret = -ENODEV;
1673f1f0 1308
f3ca80fc
CH
1309 spin_lock(&dev_list_lock);
1310 list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
1311 if (ctrl->instance != instance)
1312 continue;
1313
1314 if (!ctrl->admin_q) {
1315 ret = -EWOULDBLOCK;
1316 break;
1317 }
1318 if (!kref_get_unless_zero(&ctrl->kref))
1319 break;
1320 file->private_data = ctrl;
1321 ret = 0;
1322 break;
1323 }
1324 spin_unlock(&dev_list_lock);
1325
1326 return ret;
1673f1f0
CH
1327}
1328
f3ca80fc 1329static int nvme_dev_release(struct inode *inode, struct file *file)
1673f1f0 1330{
f3ca80fc
CH
1331 nvme_put_ctrl(file->private_data);
1332 return 0;
1333}
1334
bfd89471
CH
1335static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
1336{
1337 struct nvme_ns *ns;
1338 int ret;
1339
1340 mutex_lock(&ctrl->namespaces_mutex);
1341 if (list_empty(&ctrl->namespaces)) {
1342 ret = -ENOTTY;
1343 goto out_unlock;
1344 }
1345
1346 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
1347 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
1b3c47c1 1348 dev_warn(ctrl->device,
bfd89471
CH
1349 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1350 ret = -EINVAL;
1351 goto out_unlock;
1352 }
1353
1b3c47c1 1354 dev_warn(ctrl->device,
bfd89471
CH
1355 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1356 kref_get(&ns->kref);
1357 mutex_unlock(&ctrl->namespaces_mutex);
1358
1359 ret = nvme_user_cmd(ctrl, ns, argp);
1360 nvme_put_ns(ns);
1361 return ret;
1362
1363out_unlock:
1364 mutex_unlock(&ctrl->namespaces_mutex);
1365 return ret;
1366}
1367
f3ca80fc
CH
1368static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
1369 unsigned long arg)
1370{
1371 struct nvme_ctrl *ctrl = file->private_data;
1372 void __user *argp = (void __user *)arg;
f3ca80fc
CH
1373
1374 switch (cmd) {
1375 case NVME_IOCTL_ADMIN_CMD:
1376 return nvme_user_cmd(ctrl, NULL, argp);
1377 case NVME_IOCTL_IO_CMD:
bfd89471 1378 return nvme_dev_user_cmd(ctrl, argp);
f3ca80fc 1379 case NVME_IOCTL_RESET:
1b3c47c1 1380 dev_warn(ctrl->device, "resetting controller\n");
f3ca80fc
CH
1381 return ctrl->ops->reset_ctrl(ctrl);
1382 case NVME_IOCTL_SUBSYS_RESET:
1383 return nvme_reset_subsystem(ctrl);
9ec3bb2f
KB
1384 case NVME_IOCTL_RESCAN:
1385 nvme_queue_scan(ctrl);
1386 return 0;
f3ca80fc
CH
1387 default:
1388 return -ENOTTY;
1389 }
1390}
1391
1392static const struct file_operations nvme_dev_fops = {
1393 .owner = THIS_MODULE,
1394 .open = nvme_dev_open,
1395 .release = nvme_dev_release,
1396 .unlocked_ioctl = nvme_dev_ioctl,
1397 .compat_ioctl = nvme_dev_ioctl,
1398};
1399
1400static ssize_t nvme_sysfs_reset(struct device *dev,
1401 struct device_attribute *attr, const char *buf,
1402 size_t count)
1403{
1404 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1405 int ret;
1406
1407 ret = ctrl->ops->reset_ctrl(ctrl);
1408 if (ret < 0)
1409 return ret;
1410 return count;
1673f1f0 1411}
f3ca80fc 1412static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
1673f1f0 1413
9ec3bb2f
KB
1414static ssize_t nvme_sysfs_rescan(struct device *dev,
1415 struct device_attribute *attr, const char *buf,
1416 size_t count)
1417{
1418 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1419
1420 nvme_queue_scan(ctrl);
1421 return count;
1422}
1423static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
1424
118472ab
KB
1425static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
1426 char *buf)
1427{
40267efd 1428 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
118472ab
KB
1429 struct nvme_ctrl *ctrl = ns->ctrl;
1430 int serial_len = sizeof(ctrl->serial);
1431 int model_len = sizeof(ctrl->model);
1432
1433 if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
1434 return sprintf(buf, "eui.%16phN\n", ns->uuid);
1435
1436 if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
1437 return sprintf(buf, "eui.%8phN\n", ns->eui);
1438
1439 while (ctrl->serial[serial_len - 1] == ' ')
1440 serial_len--;
1441 while (ctrl->model[model_len - 1] == ' ')
1442 model_len--;
1443
1444 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
1445 serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
1446}
1447static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
1448
2b9b6e86
KB
1449static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
1450 char *buf)
1451{
40267efd 1452 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2b9b6e86
KB
1453 return sprintf(buf, "%pU\n", ns->uuid);
1454}
1455static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
1456
1457static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
1458 char *buf)
1459{
40267efd 1460 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2b9b6e86
KB
1461 return sprintf(buf, "%8phd\n", ns->eui);
1462}
1463static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
1464
1465static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
1466 char *buf)
1467{
40267efd 1468 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2b9b6e86
KB
1469 return sprintf(buf, "%d\n", ns->ns_id);
1470}
1471static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
1472
1473static struct attribute *nvme_ns_attrs[] = {
118472ab 1474 &dev_attr_wwid.attr,
2b9b6e86
KB
1475 &dev_attr_uuid.attr,
1476 &dev_attr_eui.attr,
1477 &dev_attr_nsid.attr,
1478 NULL,
1479};
1480
1a353d85 1481static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
2b9b6e86
KB
1482 struct attribute *a, int n)
1483{
1484 struct device *dev = container_of(kobj, struct device, kobj);
40267efd 1485 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2b9b6e86
KB
1486
1487 if (a == &dev_attr_uuid.attr) {
1488 if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
1489 return 0;
1490 }
1491 if (a == &dev_attr_eui.attr) {
1492 if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
1493 return 0;
1494 }
1495 return a->mode;
1496}
1497
1498static const struct attribute_group nvme_ns_attr_group = {
1499 .attrs = nvme_ns_attrs,
1a353d85 1500 .is_visible = nvme_ns_attrs_are_visible,
2b9b6e86
KB
1501};
1502
931e1c22 1503#define nvme_show_str_function(field) \
779ff756
KB
1504static ssize_t field##_show(struct device *dev, \
1505 struct device_attribute *attr, char *buf) \
1506{ \
1507 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1508 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
1509} \
1510static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1511
931e1c22
ML
1512#define nvme_show_int_function(field) \
1513static ssize_t field##_show(struct device *dev, \
1514 struct device_attribute *attr, char *buf) \
1515{ \
1516 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1517 return sprintf(buf, "%d\n", ctrl->field); \
1518} \
1519static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1520
1521nvme_show_str_function(model);
1522nvme_show_str_function(serial);
1523nvme_show_str_function(firmware_rev);
1524nvme_show_int_function(cntlid);
779ff756 1525
1a353d85
ML
1526static ssize_t nvme_sysfs_delete(struct device *dev,
1527 struct device_attribute *attr, const char *buf,
1528 size_t count)
1529{
1530 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1531
1532 if (device_remove_file_self(dev, attr))
1533 ctrl->ops->delete_ctrl(ctrl);
1534 return count;
1535}
1536static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
1537
1538static ssize_t nvme_sysfs_show_transport(struct device *dev,
1539 struct device_attribute *attr,
1540 char *buf)
1541{
1542 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1543
1544 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
1545}
1546static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
1547
1548static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
1549 struct device_attribute *attr,
1550 char *buf)
1551{
1552 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1553
1554 return snprintf(buf, PAGE_SIZE, "%s\n",
1555 ctrl->ops->get_subsysnqn(ctrl));
1556}
1557static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
1558
1559static ssize_t nvme_sysfs_show_address(struct device *dev,
1560 struct device_attribute *attr,
1561 char *buf)
1562{
1563 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1564
1565 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
1566}
1567static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
1568
779ff756
KB
1569static struct attribute *nvme_dev_attrs[] = {
1570 &dev_attr_reset_controller.attr,
9ec3bb2f 1571 &dev_attr_rescan_controller.attr,
779ff756
KB
1572 &dev_attr_model.attr,
1573 &dev_attr_serial.attr,
1574 &dev_attr_firmware_rev.attr,
931e1c22 1575 &dev_attr_cntlid.attr,
1a353d85
ML
1576 &dev_attr_delete_controller.attr,
1577 &dev_attr_transport.attr,
1578 &dev_attr_subsysnqn.attr,
1579 &dev_attr_address.attr,
779ff756
KB
1580 NULL
1581};
1582
1a353d85
ML
1583#define CHECK_ATTR(ctrl, a, name) \
1584 if ((a) == &dev_attr_##name.attr && \
1585 !(ctrl)->ops->get_##name) \
1586 return 0
1587
1588static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
1589 struct attribute *a, int n)
1590{
1591 struct device *dev = container_of(kobj, struct device, kobj);
1592 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1593
1594 if (a == &dev_attr_delete_controller.attr) {
1595 if (!ctrl->ops->delete_ctrl)
1596 return 0;
1597 }
1598
1599 CHECK_ATTR(ctrl, a, subsysnqn);
1600 CHECK_ATTR(ctrl, a, address);
1601
1602 return a->mode;
1603}
1604
779ff756 1605static struct attribute_group nvme_dev_attrs_group = {
1a353d85
ML
1606 .attrs = nvme_dev_attrs,
1607 .is_visible = nvme_dev_attrs_are_visible,
779ff756
KB
1608};
1609
1610static const struct attribute_group *nvme_dev_attr_groups[] = {
1611 &nvme_dev_attrs_group,
1612 NULL,
1613};
1614
5bae7f73
CH
1615static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
1616{
1617 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
1618 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
1619
1620 return nsa->ns_id - nsb->ns_id;
1621}
1622
32f0c4af 1623static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
5bae7f73 1624{
32f0c4af 1625 struct nvme_ns *ns, *ret = NULL;
69d3b8ac 1626
32f0c4af 1627 mutex_lock(&ctrl->namespaces_mutex);
5bae7f73 1628 list_for_each_entry(ns, &ctrl->namespaces, list) {
32f0c4af
KB
1629 if (ns->ns_id == nsid) {
1630 kref_get(&ns->kref);
1631 ret = ns;
1632 break;
1633 }
5bae7f73
CH
1634 if (ns->ns_id > nsid)
1635 break;
1636 }
32f0c4af
KB
1637 mutex_unlock(&ctrl->namespaces_mutex);
1638 return ret;
5bae7f73
CH
1639}
1640
1641static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1642{
1643 struct nvme_ns *ns;
1644 struct gendisk *disk;
ac81bfa9
MB
1645 struct nvme_id_ns *id;
1646 char disk_name[DISK_NAME_LEN];
5bae7f73
CH
1647 int node = dev_to_node(ctrl->dev);
1648
1649 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1650 if (!ns)
1651 return;
1652
075790eb
KB
1653 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
1654 if (ns->instance < 0)
1655 goto out_free_ns;
1656
5bae7f73
CH
1657 ns->queue = blk_mq_init_queue(ctrl->tagset);
1658 if (IS_ERR(ns->queue))
075790eb 1659 goto out_release_instance;
5bae7f73
CH
1660 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1661 ns->queue->queuedata = ns;
1662 ns->ctrl = ctrl;
1663
5bae7f73
CH
1664 kref_init(&ns->kref);
1665 ns->ns_id = nsid;
5bae7f73 1666 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
5bae7f73
CH
1667
1668 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
da35825d 1669 nvme_set_queue_limits(ctrl, ns->queue);
5bae7f73 1670
ac81bfa9 1671 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
5bae7f73 1672
ac81bfa9
MB
1673 if (nvme_revalidate_ns(ns, &id))
1674 goto out_free_queue;
1675
1676 if (nvme_nvm_ns_supported(ns, id)) {
40267efd
SL
1677 if (nvme_nvm_register(ns, disk_name, node,
1678 &nvme_ns_attr_group)) {
b0b4e09c
MB
1679 dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
1680 __func__);
ac81bfa9
MB
1681 goto out_free_id;
1682 }
ac81bfa9
MB
1683 } else {
1684 disk = alloc_disk_node(0, node);
1685 if (!disk)
1686 goto out_free_id;
1687
1688 disk->fops = &nvme_fops;
1689 disk->private_data = ns;
1690 disk->queue = ns->queue;
1691 disk->flags = GENHD_FL_EXT_DEVT;
1692 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
1693 ns->disk = disk;
1694
1695 __nvme_revalidate_disk(disk, id);
1696 }
5bae7f73 1697
32f0c4af
KB
1698 mutex_lock(&ctrl->namespaces_mutex);
1699 list_add_tail(&ns->list, &ctrl->namespaces);
1700 mutex_unlock(&ctrl->namespaces_mutex);
1701
5bae7f73 1702 kref_get(&ctrl->kref);
ac81bfa9
MB
1703
1704 kfree(id);
1705
b0b4e09c 1706 if (ns->ndev)
2b9b6e86 1707 return;
5bae7f73 1708
0d52c756 1709 device_add_disk(ctrl->device, ns->disk);
2b9b6e86
KB
1710 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1711 &nvme_ns_attr_group))
1712 pr_warn("%s: failed to create sysfs group for identification\n",
1713 ns->disk->disk_name);
5bae7f73 1714 return;
ac81bfa9
MB
1715 out_free_id:
1716 kfree(id);
5bae7f73
CH
1717 out_free_queue:
1718 blk_cleanup_queue(ns->queue);
075790eb
KB
1719 out_release_instance:
1720 ida_simple_remove(&ctrl->ns_ida, ns->instance);
5bae7f73
CH
1721 out_free_ns:
1722 kfree(ns);
1723}
1724
1725static void nvme_ns_remove(struct nvme_ns *ns)
1726{
646017a6
KB
1727 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
1728 return;
69d3b8ac 1729
b0b4e09c 1730 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
5bae7f73
CH
1731 if (blk_get_integrity(ns->disk))
1732 blk_integrity_unregister(ns->disk);
2b9b6e86
KB
1733 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1734 &nvme_ns_attr_group);
5bae7f73 1735 del_gendisk(ns->disk);
5bae7f73
CH
1736 blk_mq_abort_requeue_list(ns->queue);
1737 blk_cleanup_queue(ns->queue);
1738 }
32f0c4af
KB
1739
1740 mutex_lock(&ns->ctrl->namespaces_mutex);
5bae7f73 1741 list_del_init(&ns->list);
32f0c4af
KB
1742 mutex_unlock(&ns->ctrl->namespaces_mutex);
1743
5bae7f73
CH
1744 nvme_put_ns(ns);
1745}
1746
540c801c
KB
1747static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1748{
1749 struct nvme_ns *ns;
1750
32f0c4af 1751 ns = nvme_find_get_ns(ctrl, nsid);
540c801c 1752 if (ns) {
b0b4e09c 1753 if (ns->disk && revalidate_disk(ns->disk))
540c801c 1754 nvme_ns_remove(ns);
32f0c4af 1755 nvme_put_ns(ns);
540c801c
KB
1756 } else
1757 nvme_alloc_ns(ctrl, nsid);
1758}
1759
47b0e50a
SB
1760static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
1761 unsigned nsid)
1762{
1763 struct nvme_ns *ns, *next;
1764
1765 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
1766 if (ns->ns_id > nsid)
1767 nvme_ns_remove(ns);
1768 }
1769}
1770
540c801c
KB
1771static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
1772{
1773 struct nvme_ns *ns;
1774 __le32 *ns_list;
1775 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
1776 int ret = 0;
1777
1778 ns_list = kzalloc(0x1000, GFP_KERNEL);
1779 if (!ns_list)
1780 return -ENOMEM;
1781
1782 for (i = 0; i < num_lists; i++) {
1783 ret = nvme_identify_ns_list(ctrl, prev, ns_list);
1784 if (ret)
47b0e50a 1785 goto free;
540c801c
KB
1786
1787 for (j = 0; j < min(nn, 1024U); j++) {
1788 nsid = le32_to_cpu(ns_list[j]);
1789 if (!nsid)
1790 goto out;
1791
1792 nvme_validate_ns(ctrl, nsid);
1793
1794 while (++prev < nsid) {
32f0c4af
KB
1795 ns = nvme_find_get_ns(ctrl, prev);
1796 if (ns) {
540c801c 1797 nvme_ns_remove(ns);
32f0c4af
KB
1798 nvme_put_ns(ns);
1799 }
540c801c
KB
1800 }
1801 }
1802 nn -= j;
1803 }
1804 out:
47b0e50a
SB
1805 nvme_remove_invalid_namespaces(ctrl, prev);
1806 free:
540c801c
KB
1807 kfree(ns_list);
1808 return ret;
1809}
1810
5955be21 1811static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
5bae7f73 1812{
5bae7f73
CH
1813 unsigned i;
1814
540c801c
KB
1815 for (i = 1; i <= nn; i++)
1816 nvme_validate_ns(ctrl, i);
1817
47b0e50a 1818 nvme_remove_invalid_namespaces(ctrl, nn);
5bae7f73
CH
1819}
1820
5955be21 1821static void nvme_scan_work(struct work_struct *work)
5bae7f73 1822{
5955be21
CH
1823 struct nvme_ctrl *ctrl =
1824 container_of(work, struct nvme_ctrl, scan_work);
5bae7f73 1825 struct nvme_id_ctrl *id;
540c801c 1826 unsigned nn;
5bae7f73 1827
5955be21
CH
1828 if (ctrl->state != NVME_CTRL_LIVE)
1829 return;
1830
5bae7f73
CH
1831 if (nvme_identify_ctrl(ctrl, &id))
1832 return;
540c801c
KB
1833
1834 nn = le32_to_cpu(id->nn);
1835 if (ctrl->vs >= NVME_VS(1, 1) &&
1836 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1837 if (!nvme_scan_ns_list(ctrl, nn))
1838 goto done;
1839 }
5955be21 1840 nvme_scan_ns_sequential(ctrl, nn);
540c801c 1841 done:
32f0c4af 1842 mutex_lock(&ctrl->namespaces_mutex);
540c801c 1843 list_sort(NULL, &ctrl->namespaces, ns_cmp);
69d3b8ac 1844 mutex_unlock(&ctrl->namespaces_mutex);
5bae7f73
CH
1845 kfree(id);
1846}
5955be21
CH
1847
1848void nvme_queue_scan(struct nvme_ctrl *ctrl)
1849{
1850 /*
1851 * Do not queue new scan work when a controller is reset during
1852 * removal.
1853 */
1854 if (ctrl->state == NVME_CTRL_LIVE)
1855 schedule_work(&ctrl->scan_work);
1856}
1857EXPORT_SYMBOL_GPL(nvme_queue_scan);
5bae7f73 1858
32f0c4af
KB
1859/*
1860 * This function iterates the namespace list unlocked to allow recovery from
1861 * controller failure. It is up to the caller to ensure the namespace list is
1862 * not modified by scan work while this function is executing.
1863 */
5bae7f73
CH
1864void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1865{
1866 struct nvme_ns *ns, *next;
1867
0ff9d4e1
KB
1868 /*
1869 * The dead states indicates the controller was not gracefully
1870 * disconnected. In that case, we won't be able to flush any data while
1871 * removing the namespaces' disks; fail all the queues now to avoid
1872 * potentially having to clean up the failed sync later.
1873 */
1874 if (ctrl->state == NVME_CTRL_DEAD)
1875 nvme_kill_queues(ctrl);
1876
5bae7f73
CH
1877 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1878 nvme_ns_remove(ns);
1879}
576d55d6 1880EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
5bae7f73 1881
f866fc42
CH
1882static void nvme_async_event_work(struct work_struct *work)
1883{
1884 struct nvme_ctrl *ctrl =
1885 container_of(work, struct nvme_ctrl, async_event_work);
1886
1887 spin_lock_irq(&ctrl->lock);
1888 while (ctrl->event_limit > 0) {
1889 int aer_idx = --ctrl->event_limit;
1890
1891 spin_unlock_irq(&ctrl->lock);
1892 ctrl->ops->submit_async_event(ctrl, aer_idx);
1893 spin_lock_irq(&ctrl->lock);
1894 }
1895 spin_unlock_irq(&ctrl->lock);
1896}
1897
1898void nvme_complete_async_event(struct nvme_ctrl *ctrl,
1899 struct nvme_completion *cqe)
1900{
1901 u16 status = le16_to_cpu(cqe->status) >> 1;
d49187e9 1902 u32 result = le32_to_cpu(cqe->result.u32);
f866fc42
CH
1903
1904 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
1905 ++ctrl->event_limit;
1906 schedule_work(&ctrl->async_event_work);
1907 }
1908
1909 if (status != NVME_SC_SUCCESS)
1910 return;
1911
1912 switch (result & 0xff07) {
1913 case NVME_AER_NOTICE_NS_CHANGED:
1914 dev_info(ctrl->device, "rescanning\n");
1915 nvme_queue_scan(ctrl);
1916 break;
1917 default:
1918 dev_warn(ctrl->device, "async event result %08x\n", result);
1919 }
1920}
1921EXPORT_SYMBOL_GPL(nvme_complete_async_event);
1922
1923void nvme_queue_async_events(struct nvme_ctrl *ctrl)
1924{
1925 ctrl->event_limit = NVME_NR_AERS;
1926 schedule_work(&ctrl->async_event_work);
1927}
1928EXPORT_SYMBOL_GPL(nvme_queue_async_events);
1929
f3ca80fc
CH
1930static DEFINE_IDA(nvme_instance_ida);
1931
1932static int nvme_set_instance(struct nvme_ctrl *ctrl)
1933{
1934 int instance, error;
1935
1936 do {
1937 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1938 return -ENODEV;
1939
1940 spin_lock(&dev_list_lock);
1941 error = ida_get_new(&nvme_instance_ida, &instance);
1942 spin_unlock(&dev_list_lock);
1943 } while (error == -EAGAIN);
1944
1945 if (error)
1946 return -ENODEV;
1947
1948 ctrl->instance = instance;
1949 return 0;
1950}
1951
1952static void nvme_release_instance(struct nvme_ctrl *ctrl)
1953{
1954 spin_lock(&dev_list_lock);
1955 ida_remove(&nvme_instance_ida, ctrl->instance);
1956 spin_unlock(&dev_list_lock);
1957}
1958
53029b04 1959void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
576d55d6 1960{
f866fc42 1961 flush_work(&ctrl->async_event_work);
5955be21
CH
1962 flush_work(&ctrl->scan_work);
1963 nvme_remove_namespaces(ctrl);
1964
53029b04 1965 device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
f3ca80fc
CH
1966
1967 spin_lock(&dev_list_lock);
1968 list_del(&ctrl->node);
1969 spin_unlock(&dev_list_lock);
53029b04 1970}
576d55d6 1971EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
53029b04
KB
1972
1973static void nvme_free_ctrl(struct kref *kref)
1974{
1975 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
f3ca80fc
CH
1976
1977 put_device(ctrl->device);
1978 nvme_release_instance(ctrl);
075790eb 1979 ida_destroy(&ctrl->ns_ida);
f3ca80fc
CH
1980
1981 ctrl->ops->free_ctrl(ctrl);
1982}
1983
1984void nvme_put_ctrl(struct nvme_ctrl *ctrl)
1985{
1986 kref_put(&ctrl->kref, nvme_free_ctrl);
1987}
576d55d6 1988EXPORT_SYMBOL_GPL(nvme_put_ctrl);
f3ca80fc
CH
1989
1990/*
1991 * Initialize a NVMe controller structures. This needs to be called during
1992 * earliest initialization so that we have the initialized structured around
1993 * during probing.
1994 */
1995int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
1996 const struct nvme_ctrl_ops *ops, unsigned long quirks)
1997{
1998 int ret;
1999
bb8d261e
CH
2000 ctrl->state = NVME_CTRL_NEW;
2001 spin_lock_init(&ctrl->lock);
f3ca80fc 2002 INIT_LIST_HEAD(&ctrl->namespaces);
69d3b8ac 2003 mutex_init(&ctrl->namespaces_mutex);
f3ca80fc
CH
2004 kref_init(&ctrl->kref);
2005 ctrl->dev = dev;
2006 ctrl->ops = ops;
2007 ctrl->quirks = quirks;
5955be21 2008 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
f866fc42 2009 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
f3ca80fc
CH
2010
2011 ret = nvme_set_instance(ctrl);
2012 if (ret)
2013 goto out;
2014
779ff756 2015 ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
f3ca80fc 2016 MKDEV(nvme_char_major, ctrl->instance),
f4f0f63e 2017 ctrl, nvme_dev_attr_groups,
779ff756 2018 "nvme%d", ctrl->instance);
f3ca80fc
CH
2019 if (IS_ERR(ctrl->device)) {
2020 ret = PTR_ERR(ctrl->device);
2021 goto out_release_instance;
2022 }
2023 get_device(ctrl->device);
075790eb 2024 ida_init(&ctrl->ns_ida);
f3ca80fc 2025
f3ca80fc
CH
2026 spin_lock(&dev_list_lock);
2027 list_add_tail(&ctrl->node, &nvme_ctrl_list);
2028 spin_unlock(&dev_list_lock);
2029
2030 return 0;
f3ca80fc
CH
2031out_release_instance:
2032 nvme_release_instance(ctrl);
2033out:
2034 return ret;
2035}
576d55d6 2036EXPORT_SYMBOL_GPL(nvme_init_ctrl);
f3ca80fc 2037
69d9a99c
KB
2038/**
2039 * nvme_kill_queues(): Ends all namespace queues
2040 * @ctrl: the dead controller that needs to end
2041 *
2042 * Call this function when the driver determines it is unable to get the
2043 * controller in a state capable of servicing IO.
2044 */
2045void nvme_kill_queues(struct nvme_ctrl *ctrl)
2046{
2047 struct nvme_ns *ns;
2048
32f0c4af
KB
2049 mutex_lock(&ctrl->namespaces_mutex);
2050 list_for_each_entry(ns, &ctrl->namespaces, list) {
69d9a99c
KB
2051 /*
2052 * Revalidating a dead namespace sets capacity to 0. This will
2053 * end buffered writers dirtying pages that can't be synced.
2054 */
b0b4e09c 2055 if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags))
69d9a99c
KB
2056 revalidate_disk(ns->disk);
2057
2058 blk_set_queue_dying(ns->queue);
2059 blk_mq_abort_requeue_list(ns->queue);
2060 blk_mq_start_stopped_hw_queues(ns->queue, true);
69d9a99c 2061 }
32f0c4af 2062 mutex_unlock(&ctrl->namespaces_mutex);
69d9a99c 2063}
237045fc 2064EXPORT_SYMBOL_GPL(nvme_kill_queues);
69d9a99c 2065
25646264 2066void nvme_stop_queues(struct nvme_ctrl *ctrl)
363c9aac
SG
2067{
2068 struct nvme_ns *ns;
2069
32f0c4af 2070 mutex_lock(&ctrl->namespaces_mutex);
a6eaa884 2071 list_for_each_entry(ns, &ctrl->namespaces, list)
3174dd33 2072 blk_mq_quiesce_queue(ns->queue);
32f0c4af 2073 mutex_unlock(&ctrl->namespaces_mutex);
363c9aac 2074}
576d55d6 2075EXPORT_SYMBOL_GPL(nvme_stop_queues);
363c9aac 2076
25646264 2077void nvme_start_queues(struct nvme_ctrl *ctrl)
363c9aac
SG
2078{
2079 struct nvme_ns *ns;
2080
32f0c4af
KB
2081 mutex_lock(&ctrl->namespaces_mutex);
2082 list_for_each_entry(ns, &ctrl->namespaces, list) {
363c9aac
SG
2083 blk_mq_start_stopped_hw_queues(ns->queue, true);
2084 blk_mq_kick_requeue_list(ns->queue);
2085 }
32f0c4af 2086 mutex_unlock(&ctrl->namespaces_mutex);
363c9aac 2087}
576d55d6 2088EXPORT_SYMBOL_GPL(nvme_start_queues);
363c9aac 2089
5bae7f73
CH
2090int __init nvme_core_init(void)
2091{
2092 int result;
2093
f3ca80fc
CH
2094 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
2095 &nvme_dev_fops);
2096 if (result < 0)
b09dcf58 2097 return result;
f3ca80fc
CH
2098 else if (result > 0)
2099 nvme_char_major = result;
2100
2101 nvme_class = class_create(THIS_MODULE, "nvme");
2102 if (IS_ERR(nvme_class)) {
2103 result = PTR_ERR(nvme_class);
2104 goto unregister_chrdev;
2105 }
2106
5bae7f73 2107 return 0;
f3ca80fc
CH
2108
2109 unregister_chrdev:
2110 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
f3ca80fc 2111 return result;
5bae7f73
CH
2112}
2113
2114void nvme_core_exit(void)
2115{
f3ca80fc
CH
2116 class_destroy(nvme_class);
2117 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
5bae7f73 2118}
576d55d6
ML
2119
2120MODULE_LICENSE("GPL");
2121MODULE_VERSION("1.0");
2122module_init(nvme_core_init);
2123module_exit(nvme_core_exit);