2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
30 #include <asm/unaligned.h>
34 static int nvme_major
;
35 module_param(nvme_major
, int, 0);
37 DEFINE_SPINLOCK(dev_list_lock
);
39 static void nvme_free_ns(struct kref
*kref
)
41 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
43 if (ns
->type
== NVME_NS_LIGHTNVM
)
44 nvme_nvm_unregister(ns
->queue
, ns
->disk
->disk_name
);
46 spin_lock(&dev_list_lock
);
47 ns
->disk
->private_data
= NULL
;
48 spin_unlock(&dev_list_lock
);
50 nvme_put_ctrl(ns
->ctrl
);
55 static void nvme_put_ns(struct nvme_ns
*ns
)
57 kref_put(&ns
->kref
, nvme_free_ns
);
60 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
64 spin_lock(&dev_list_lock
);
65 ns
= disk
->private_data
;
66 if (ns
&& !kref_get_unless_zero(&ns
->kref
))
68 spin_unlock(&dev_list_lock
);
73 struct request
*nvme_alloc_request(struct request_queue
*q
,
74 struct nvme_command
*cmd
, unsigned int flags
)
76 bool write
= cmd
->common
.opcode
& 1;
79 req
= blk_mq_alloc_request(q
, write
, flags
);
83 req
->cmd_type
= REQ_TYPE_DRV_PRIV
;
84 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
86 req
->__sector
= (sector_t
) -1;
87 req
->bio
= req
->biotail
= NULL
;
89 req
->cmd
= (unsigned char *)cmd
;
90 req
->cmd_len
= sizeof(struct nvme_command
);
91 req
->special
= (void *)0;
97 * Returns 0 on success. If the result is negative, it's a Linux error code;
98 * if the result is positive, it's an NVM Express status code
100 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
101 void *buffer
, unsigned bufflen
, u32
*result
, unsigned timeout
)
106 req
= nvme_alloc_request(q
, cmd
, 0);
110 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
112 if (buffer
&& bufflen
) {
113 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
118 blk_execute_rq(req
->q
, NULL
, req
, 0);
120 *result
= (u32
)(uintptr_t)req
->special
;
123 blk_mq_free_request(req
);
127 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
128 void *buffer
, unsigned bufflen
)
130 return __nvme_submit_sync_cmd(q
, cmd
, buffer
, bufflen
, NULL
, 0);
133 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
134 void __user
*ubuffer
, unsigned bufflen
,
135 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
136 u32
*result
, unsigned timeout
)
138 bool write
= cmd
->common
.opcode
& 1;
139 struct nvme_ns
*ns
= q
->queuedata
;
140 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
142 struct bio
*bio
= NULL
;
146 req
= nvme_alloc_request(q
, cmd
, 0);
150 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
152 if (ubuffer
&& bufflen
) {
153 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
161 bio
->bi_bdev
= bdget_disk(disk
, 0);
168 struct bio_integrity_payload
*bip
;
170 meta
= kmalloc(meta_len
, GFP_KERNEL
);
177 if (copy_from_user(meta
, meta_buffer
,
184 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
190 bip
->bip_iter
.bi_size
= meta_len
;
191 bip
->bip_iter
.bi_sector
= meta_seed
;
193 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
194 meta_len
, offset_in_page(meta
));
195 if (ret
!= meta_len
) {
202 blk_execute_rq(req
->q
, disk
, req
, 0);
205 *result
= (u32
)(uintptr_t)req
->special
;
206 if (meta
&& !ret
&& !write
) {
207 if (copy_to_user(meta_buffer
, meta
, meta_len
))
214 if (disk
&& bio
->bi_bdev
)
216 blk_rq_unmap_user(bio
);
219 blk_mq_free_request(req
);
223 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
224 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
227 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
231 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
233 struct nvme_command c
= { };
236 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
237 c
.identify
.opcode
= nvme_admin_identify
;
238 c
.identify
.cns
= cpu_to_le32(1);
240 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
244 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
245 sizeof(struct nvme_id_ctrl
));
251 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
252 struct nvme_id_ns
**id
)
254 struct nvme_command c
= { };
257 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
258 c
.identify
.opcode
= nvme_admin_identify
,
259 c
.identify
.nsid
= cpu_to_le32(nsid
),
261 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
265 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
266 sizeof(struct nvme_id_ns
));
272 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
273 dma_addr_t dma_addr
, u32
*result
)
275 struct nvme_command c
;
277 memset(&c
, 0, sizeof(c
));
278 c
.features
.opcode
= nvme_admin_get_features
;
279 c
.features
.nsid
= cpu_to_le32(nsid
);
280 c
.features
.prp1
= cpu_to_le64(dma_addr
);
281 c
.features
.fid
= cpu_to_le32(fid
);
283 return __nvme_submit_sync_cmd(dev
->admin_q
, &c
, NULL
, 0, result
, 0);
286 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
287 dma_addr_t dma_addr
, u32
*result
)
289 struct nvme_command c
;
291 memset(&c
, 0, sizeof(c
));
292 c
.features
.opcode
= nvme_admin_set_features
;
293 c
.features
.prp1
= cpu_to_le64(dma_addr
);
294 c
.features
.fid
= cpu_to_le32(fid
);
295 c
.features
.dword11
= cpu_to_le32(dword11
);
297 return __nvme_submit_sync_cmd(dev
->admin_q
, &c
, NULL
, 0, result
, 0);
300 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
)
302 struct nvme_command c
= { };
305 c
.common
.opcode
= nvme_admin_get_log_page
,
306 c
.common
.nsid
= cpu_to_le32(0xFFFFFFFF),
307 c
.common
.cdw10
[0] = cpu_to_le32(
308 (((sizeof(struct nvme_smart_log
) / 4) - 1) << 16) |
311 *log
= kmalloc(sizeof(struct nvme_smart_log
), GFP_KERNEL
);
315 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *log
,
316 sizeof(struct nvme_smart_log
));
322 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
324 struct nvme_user_io io
;
325 struct nvme_command c
;
326 unsigned length
, meta_len
;
327 void __user
*metadata
;
329 if (copy_from_user(&io
, uio
, sizeof(io
)))
335 case nvme_cmd_compare
:
341 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
342 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
343 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
348 } else if (meta_len
) {
349 if ((io
.metadata
& 3) || !io
.metadata
)
353 memset(&c
, 0, sizeof(c
));
354 c
.rw
.opcode
= io
.opcode
;
355 c
.rw
.flags
= io
.flags
;
356 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
357 c
.rw
.slba
= cpu_to_le64(io
.slba
);
358 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
359 c
.rw
.control
= cpu_to_le16(io
.control
);
360 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
361 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
362 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
363 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
365 return __nvme_submit_user_cmd(ns
->queue
, &c
,
366 (void __user
*)(uintptr_t)io
.addr
, length
,
367 metadata
, meta_len
, io
.slba
, NULL
, 0);
370 int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
371 struct nvme_passthru_cmd __user
*ucmd
)
373 struct nvme_passthru_cmd cmd
;
374 struct nvme_command c
;
375 unsigned timeout
= 0;
378 if (!capable(CAP_SYS_ADMIN
))
380 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
383 memset(&c
, 0, sizeof(c
));
384 c
.common
.opcode
= cmd
.opcode
;
385 c
.common
.flags
= cmd
.flags
;
386 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
387 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
388 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
389 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
390 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
391 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
392 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
393 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
394 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
397 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
399 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
400 (void __user
*)cmd
.addr
, cmd
.data_len
,
401 &cmd
.result
, timeout
);
403 if (put_user(cmd
.result
, &ucmd
->result
))
410 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
411 unsigned int cmd
, unsigned long arg
)
413 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
417 force_successful_syscall_return();
419 case NVME_IOCTL_ADMIN_CMD
:
420 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
421 case NVME_IOCTL_IO_CMD
:
422 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
423 case NVME_IOCTL_SUBMIT_IO
:
424 return nvme_submit_io(ns
, (void __user
*)arg
);
425 case SG_GET_VERSION_NUM
:
426 return nvme_sg_get_version_num((void __user
*)arg
);
428 return nvme_sg_io(ns
, (void __user
*)arg
);
435 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
436 unsigned int cmd
, unsigned long arg
)
442 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
445 #define nvme_compat_ioctl NULL
448 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
450 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
453 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
455 nvme_put_ns(disk
->private_data
);
458 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
460 /* some standard values */
462 geo
->sectors
= 1 << 5;
463 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
467 #ifdef CONFIG_BLK_DEV_INTEGRITY
468 static void nvme_init_integrity(struct nvme_ns
*ns
)
470 struct blk_integrity integrity
;
472 switch (ns
->pi_type
) {
473 case NVME_NS_DPS_PI_TYPE3
:
474 integrity
.profile
= &t10_pi_type3_crc
;
476 case NVME_NS_DPS_PI_TYPE1
:
477 case NVME_NS_DPS_PI_TYPE2
:
478 integrity
.profile
= &t10_pi_type1_crc
;
481 integrity
.profile
= NULL
;
484 integrity
.tuple_size
= ns
->ms
;
485 blk_integrity_register(ns
->disk
, &integrity
);
486 blk_queue_max_integrity_segments(ns
->queue
, 1);
489 static void nvme_init_integrity(struct nvme_ns
*ns
)
492 #endif /* CONFIG_BLK_DEV_INTEGRITY */
494 static void nvme_config_discard(struct nvme_ns
*ns
)
496 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
497 ns
->queue
->limits
.discard_zeroes_data
= 0;
498 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
499 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
500 blk_queue_max_discard_sectors(ns
->queue
, 0xffffffff);
501 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
504 static int nvme_revalidate_disk(struct gendisk
*disk
)
506 struct nvme_ns
*ns
= disk
->private_data
;
507 struct nvme_id_ns
*id
;
512 if (nvme_identify_ns(ns
->ctrl
, ns
->ns_id
, &id
)) {
513 dev_warn(ns
->ctrl
->dev
, "%s: Identify failure nvme%dn%d\n",
514 __func__
, ns
->ctrl
->instance
, ns
->ns_id
);
522 if (nvme_nvm_ns_supported(ns
, id
) && ns
->type
!= NVME_NS_LIGHTNVM
) {
523 if (nvme_nvm_register(ns
->queue
, disk
->disk_name
)) {
524 dev_warn(ns
->ctrl
->dev
,
525 "%s: LightNVM init failure\n", __func__
);
529 ns
->type
= NVME_NS_LIGHTNVM
;
533 lbaf
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
534 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
535 ns
->ms
= le16_to_cpu(id
->lbaf
[lbaf
].ms
);
536 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
539 * If identify namespace failed, use default 512 byte block size so
540 * block layer can use before failing read/write for 0 capacity.
542 if (ns
->lba_shift
== 0)
544 bs
= 1 << ns
->lba_shift
;
546 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
547 pi_type
= ns
->ms
== sizeof(struct t10_pi_tuple
) ?
548 id
->dps
& NVME_NS_DPS_PI_MASK
: 0;
550 blk_mq_freeze_queue(disk
->queue
);
551 if (blk_get_integrity(disk
) && (ns
->pi_type
!= pi_type
||
553 bs
!= queue_logical_block_size(disk
->queue
) ||
554 (ns
->ms
&& ns
->ext
)))
555 blk_integrity_unregister(disk
);
557 ns
->pi_type
= pi_type
;
558 blk_queue_logical_block_size(ns
->queue
, bs
);
560 if (ns
->ms
&& !ns
->ext
)
561 nvme_init_integrity(ns
);
563 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
564 set_capacity(disk
, 0);
566 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
568 if (ns
->ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
569 nvme_config_discard(ns
);
570 blk_mq_unfreeze_queue(disk
->queue
);
576 static char nvme_pr_type(enum pr_type type
)
579 case PR_WRITE_EXCLUSIVE
:
581 case PR_EXCLUSIVE_ACCESS
:
583 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
585 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
587 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
589 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
596 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
597 u64 key
, u64 sa_key
, u8 op
)
599 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
600 struct nvme_command c
;
601 u8 data
[16] = { 0, };
603 put_unaligned_le64(key
, &data
[0]);
604 put_unaligned_le64(sa_key
, &data
[8]);
606 memset(&c
, 0, sizeof(c
));
607 c
.common
.opcode
= op
;
608 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
609 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
611 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
614 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
615 u64
new, unsigned flags
)
619 if (flags
& ~PR_FL_IGNORE_KEY
)
623 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
624 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
625 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
628 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
629 enum pr_type type
, unsigned flags
)
633 if (flags
& ~PR_FL_IGNORE_KEY
)
636 cdw10
= nvme_pr_type(type
) << 8;
637 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
638 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
641 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
642 enum pr_type type
, bool abort
)
644 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
645 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
648 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
650 u32 cdw10
= 1 | key
? 1 << 3 : 0;
651 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
654 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
656 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
657 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
660 static const struct pr_ops nvme_pr_ops
= {
661 .pr_register
= nvme_pr_register
,
662 .pr_reserve
= nvme_pr_reserve
,
663 .pr_release
= nvme_pr_release
,
664 .pr_preempt
= nvme_pr_preempt
,
665 .pr_clear
= nvme_pr_clear
,
668 static const struct block_device_operations nvme_fops
= {
669 .owner
= THIS_MODULE
,
671 .compat_ioctl
= nvme_compat_ioctl
,
673 .release
= nvme_release
,
674 .getgeo
= nvme_getgeo
,
675 .revalidate_disk
= nvme_revalidate_disk
,
676 .pr_ops
= &nvme_pr_ops
,
679 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
681 unsigned long timeout
=
682 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
683 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
686 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
687 if ((csts
& NVME_CSTS_RDY
) == bit
)
691 if (fatal_signal_pending(current
))
693 if (time_after(jiffies
, timeout
)) {
695 "Device not ready; aborting %s\n", enabled
?
696 "initialisation" : "reset");
705 * If the device has been passed off to us in an enabled state, just clear
706 * the enabled bit. The spec says we should set the 'shutdown notification
707 * bits', but doing so may cause the device to complete commands to the
708 * admin queue ... and we don't know what memory that might be pointing at!
710 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
714 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
715 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
717 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
720 return nvme_wait_ready(ctrl
, cap
, false);
723 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
726 * Default to a 4K page size, with the intention to update this
727 * path in the future to accomodate architectures with differing
728 * kernel and IO page sizes.
730 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
733 if (page_shift
< dev_page_min
) {
735 "Minimum device page size %u too large for host (%u)\n",
736 1 << dev_page_min
, 1 << page_shift
);
740 ctrl
->page_size
= 1 << page_shift
;
742 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
743 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
744 ctrl
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
745 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
746 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
748 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
751 return nvme_wait_ready(ctrl
, cap
, true);
754 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
756 unsigned long timeout
= SHUTDOWN_TIMEOUT
+ jiffies
;
760 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
761 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
763 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
767 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
768 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
772 if (fatal_signal_pending(current
))
774 if (time_after(jiffies
, timeout
)) {
776 "Device shutdown incomplete; abort shutdown\n");
785 * Initialize the cached copies of the Identify data and various controller
786 * register in our nvme_ctrl structure. This should be called as soon as
787 * the admin queue is fully up and running.
789 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
791 struct nvme_id_ctrl
*id
;
795 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
797 dev_err(ctrl
->dev
, "Reading CAP failed (%d)\n", ret
);
800 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
802 ret
= nvme_identify_ctrl(ctrl
, &id
);
804 dev_err(ctrl
->dev
, "Identify Controller failed (%d)\n", ret
);
808 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
809 ctrl
->abort_limit
= id
->acl
+ 1;
811 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
812 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
813 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
815 ctrl
->max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
817 ctrl
->max_hw_sectors
= UINT_MAX
;
819 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) && id
->vs
[3]) {
820 unsigned int max_hw_sectors
;
822 ctrl
->stripe_size
= 1 << (id
->vs
[3] + page_shift
);
823 max_hw_sectors
= ctrl
->stripe_size
>> (page_shift
- 9);
824 if (ctrl
->max_hw_sectors
) {
825 ctrl
->max_hw_sectors
= min(max_hw_sectors
,
826 ctrl
->max_hw_sectors
);
828 ctrl
->max_hw_sectors
= max_hw_sectors
;
836 static void nvme_free_ctrl(struct kref
*kref
)
838 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
840 ctrl
->ops
->free_ctrl(ctrl
);
843 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
845 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
848 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
850 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
851 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
853 return nsa
->ns_id
- nsb
->ns_id
;
856 static struct nvme_ns
*nvme_find_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
860 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
861 if (ns
->ns_id
== nsid
)
863 if (ns
->ns_id
> nsid
)
869 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
872 struct gendisk
*disk
;
873 int node
= dev_to_node(ctrl
->dev
);
875 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
879 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
880 if (IS_ERR(ns
->queue
))
882 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES
, ns
->queue
);
883 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
884 ns
->queue
->queuedata
= ns
;
887 disk
= alloc_disk_node(0, node
);
891 kref_init(&ns
->kref
);
894 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
895 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
897 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
898 if (ctrl
->max_hw_sectors
) {
899 blk_queue_max_hw_sectors(ns
->queue
, ctrl
->max_hw_sectors
);
900 blk_queue_max_segments(ns
->queue
,
901 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1);
903 if (ctrl
->stripe_size
)
904 blk_queue_chunk_sectors(ns
->queue
, ctrl
->stripe_size
>> 9);
905 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
906 blk_queue_flush(ns
->queue
, REQ_FLUSH
| REQ_FUA
);
907 blk_queue_virt_boundary(ns
->queue
, ctrl
->page_size
- 1);
909 disk
->major
= nvme_major
;
910 disk
->first_minor
= 0;
911 disk
->fops
= &nvme_fops
;
912 disk
->private_data
= ns
;
913 disk
->queue
= ns
->queue
;
914 disk
->driverfs_dev
= ctrl
->device
;
915 disk
->flags
= GENHD_FL_EXT_DEVT
;
916 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
, nsid
);
919 * Initialize capacity to 0 until we establish the namespace format and
920 * setup integrity extentions if necessary. The revalidate_disk after
921 * add_disk allows the driver to register with integrity if the format
924 set_capacity(disk
, 0);
925 if (nvme_revalidate_disk(ns
->disk
))
928 kref_get(&ctrl
->kref
);
929 if (ns
->type
!= NVME_NS_LIGHTNVM
) {
932 struct block_device
*bd
= bdget_disk(ns
->disk
, 0);
935 if (blkdev_get(bd
, FMODE_READ
, NULL
)) {
939 blkdev_reread_part(bd
);
940 blkdev_put(bd
, FMODE_READ
);
949 blk_cleanup_queue(ns
->queue
);
954 static void nvme_ns_remove(struct nvme_ns
*ns
)
956 bool kill
= nvme_io_incapable(ns
->ctrl
) &&
957 !blk_queue_dying(ns
->queue
);
960 blk_set_queue_dying(ns
->queue
);
961 if (ns
->disk
->flags
& GENHD_FL_UP
) {
962 if (blk_get_integrity(ns
->disk
))
963 blk_integrity_unregister(ns
->disk
);
964 del_gendisk(ns
->disk
);
966 if (kill
|| !blk_queue_dying(ns
->queue
)) {
967 blk_mq_abort_requeue_list(ns
->queue
);
968 blk_cleanup_queue(ns
->queue
);
970 list_del_init(&ns
->list
);
974 static void __nvme_scan_namespaces(struct nvme_ctrl
*ctrl
, unsigned nn
)
976 struct nvme_ns
*ns
, *next
;
979 for (i
= 1; i
<= nn
; i
++) {
980 ns
= nvme_find_ns(ctrl
, i
);
982 if (revalidate_disk(ns
->disk
))
985 nvme_alloc_ns(ctrl
, i
);
987 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
991 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
994 void nvme_scan_namespaces(struct nvme_ctrl
*ctrl
)
996 struct nvme_id_ctrl
*id
;
998 if (nvme_identify_ctrl(ctrl
, &id
))
1000 __nvme_scan_namespaces(ctrl
, le32_to_cpup(&id
->nn
));
1004 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
1006 struct nvme_ns
*ns
, *next
;
1008 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
1012 int __init
nvme_core_init(void)
1016 result
= register_blkdev(nvme_major
, "nvme");
1019 else if (result
> 0)
1020 nvme_major
= result
;
1025 void nvme_core_exit(void)
1027 unregister_blkdev(nvme_major
, "nvme");