2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
30 #include <asm/unaligned.h>
34 #define NVME_MINORS (1U << MINORBITS)
36 static int nvme_major
;
37 module_param(nvme_major
, int, 0);
39 static int nvme_char_major
;
40 module_param(nvme_char_major
, int, 0);
42 static LIST_HEAD(nvme_ctrl_list
);
43 DEFINE_SPINLOCK(dev_list_lock
);
45 static struct class *nvme_class
;
47 static void nvme_free_ns(struct kref
*kref
)
49 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
51 if (ns
->type
== NVME_NS_LIGHTNVM
)
52 nvme_nvm_unregister(ns
->queue
, ns
->disk
->disk_name
);
54 spin_lock(&dev_list_lock
);
55 ns
->disk
->private_data
= NULL
;
56 spin_unlock(&dev_list_lock
);
58 nvme_put_ctrl(ns
->ctrl
);
63 static void nvme_put_ns(struct nvme_ns
*ns
)
65 kref_put(&ns
->kref
, nvme_free_ns
);
68 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
72 spin_lock(&dev_list_lock
);
73 ns
= disk
->private_data
;
74 if (ns
&& !kref_get_unless_zero(&ns
->kref
))
76 spin_unlock(&dev_list_lock
);
81 void nvme_requeue_req(struct request
*req
)
85 blk_mq_requeue_request(req
);
86 spin_lock_irqsave(req
->q
->queue_lock
, flags
);
87 if (!blk_queue_stopped(req
->q
))
88 blk_mq_kick_requeue_list(req
->q
);
89 spin_unlock_irqrestore(req
->q
->queue_lock
, flags
);
92 struct request
*nvme_alloc_request(struct request_queue
*q
,
93 struct nvme_command
*cmd
, unsigned int flags
)
95 bool write
= cmd
->common
.opcode
& 1;
98 req
= blk_mq_alloc_request(q
, write
, flags
);
102 req
->cmd_type
= REQ_TYPE_DRV_PRIV
;
103 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
105 req
->__sector
= (sector_t
) -1;
106 req
->bio
= req
->biotail
= NULL
;
108 req
->cmd
= (unsigned char *)cmd
;
109 req
->cmd_len
= sizeof(struct nvme_command
);
110 req
->special
= (void *)0;
116 * Returns 0 on success. If the result is negative, it's a Linux error code;
117 * if the result is positive, it's an NVM Express status code
119 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
120 void *buffer
, unsigned bufflen
, u32
*result
, unsigned timeout
)
125 req
= nvme_alloc_request(q
, cmd
, 0);
129 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
131 if (buffer
&& bufflen
) {
132 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
137 blk_execute_rq(req
->q
, NULL
, req
, 0);
139 *result
= (u32
)(uintptr_t)req
->special
;
142 blk_mq_free_request(req
);
146 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
147 void *buffer
, unsigned bufflen
)
149 return __nvme_submit_sync_cmd(q
, cmd
, buffer
, bufflen
, NULL
, 0);
152 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
153 void __user
*ubuffer
, unsigned bufflen
,
154 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
155 u32
*result
, unsigned timeout
)
157 bool write
= cmd
->common
.opcode
& 1;
158 struct nvme_ns
*ns
= q
->queuedata
;
159 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
161 struct bio
*bio
= NULL
;
165 req
= nvme_alloc_request(q
, cmd
, 0);
169 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
171 if (ubuffer
&& bufflen
) {
172 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
180 bio
->bi_bdev
= bdget_disk(disk
, 0);
187 struct bio_integrity_payload
*bip
;
189 meta
= kmalloc(meta_len
, GFP_KERNEL
);
196 if (copy_from_user(meta
, meta_buffer
,
203 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
209 bip
->bip_iter
.bi_size
= meta_len
;
210 bip
->bip_iter
.bi_sector
= meta_seed
;
212 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
213 meta_len
, offset_in_page(meta
));
214 if (ret
!= meta_len
) {
221 blk_execute_rq(req
->q
, disk
, req
, 0);
224 *result
= (u32
)(uintptr_t)req
->special
;
225 if (meta
&& !ret
&& !write
) {
226 if (copy_to_user(meta_buffer
, meta
, meta_len
))
233 if (disk
&& bio
->bi_bdev
)
235 blk_rq_unmap_user(bio
);
238 blk_mq_free_request(req
);
242 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
243 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
246 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
250 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
252 struct nvme_command c
= { };
255 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
256 c
.identify
.opcode
= nvme_admin_identify
;
257 c
.identify
.cns
= cpu_to_le32(1);
259 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
263 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
264 sizeof(struct nvme_id_ctrl
));
270 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
272 struct nvme_command c
= { };
274 c
.identify
.opcode
= nvme_admin_identify
;
275 c
.identify
.cns
= cpu_to_le32(2);
276 c
.identify
.nsid
= cpu_to_le32(nsid
);
277 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
280 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
281 struct nvme_id_ns
**id
)
283 struct nvme_command c
= { };
286 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
287 c
.identify
.opcode
= nvme_admin_identify
,
288 c
.identify
.nsid
= cpu_to_le32(nsid
),
290 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
294 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
295 sizeof(struct nvme_id_ns
));
301 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
302 dma_addr_t dma_addr
, u32
*result
)
304 struct nvme_command c
;
306 memset(&c
, 0, sizeof(c
));
307 c
.features
.opcode
= nvme_admin_get_features
;
308 c
.features
.nsid
= cpu_to_le32(nsid
);
309 c
.features
.prp1
= cpu_to_le64(dma_addr
);
310 c
.features
.fid
= cpu_to_le32(fid
);
312 return __nvme_submit_sync_cmd(dev
->admin_q
, &c
, NULL
, 0, result
, 0);
315 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
316 dma_addr_t dma_addr
, u32
*result
)
318 struct nvme_command c
;
320 memset(&c
, 0, sizeof(c
));
321 c
.features
.opcode
= nvme_admin_set_features
;
322 c
.features
.prp1
= cpu_to_le64(dma_addr
);
323 c
.features
.fid
= cpu_to_le32(fid
);
324 c
.features
.dword11
= cpu_to_le32(dword11
);
326 return __nvme_submit_sync_cmd(dev
->admin_q
, &c
, NULL
, 0, result
, 0);
329 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
)
331 struct nvme_command c
= { };
334 c
.common
.opcode
= nvme_admin_get_log_page
,
335 c
.common
.nsid
= cpu_to_le32(0xFFFFFFFF),
336 c
.common
.cdw10
[0] = cpu_to_le32(
337 (((sizeof(struct nvme_smart_log
) / 4) - 1) << 16) |
340 *log
= kmalloc(sizeof(struct nvme_smart_log
), GFP_KERNEL
);
344 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *log
,
345 sizeof(struct nvme_smart_log
));
351 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
353 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
355 int status
, nr_io_queues
;
357 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, 0,
362 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
363 *count
= min(*count
, nr_io_queues
);
367 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
369 struct nvme_user_io io
;
370 struct nvme_command c
;
371 unsigned length
, meta_len
;
372 void __user
*metadata
;
374 if (copy_from_user(&io
, uio
, sizeof(io
)))
380 case nvme_cmd_compare
:
386 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
387 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
388 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
393 } else if (meta_len
) {
394 if ((io
.metadata
& 3) || !io
.metadata
)
398 memset(&c
, 0, sizeof(c
));
399 c
.rw
.opcode
= io
.opcode
;
400 c
.rw
.flags
= io
.flags
;
401 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
402 c
.rw
.slba
= cpu_to_le64(io
.slba
);
403 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
404 c
.rw
.control
= cpu_to_le16(io
.control
);
405 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
406 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
407 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
408 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
410 return __nvme_submit_user_cmd(ns
->queue
, &c
,
411 (void __user
*)(uintptr_t)io
.addr
, length
,
412 metadata
, meta_len
, io
.slba
, NULL
, 0);
415 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
416 struct nvme_passthru_cmd __user
*ucmd
)
418 struct nvme_passthru_cmd cmd
;
419 struct nvme_command c
;
420 unsigned timeout
= 0;
423 if (!capable(CAP_SYS_ADMIN
))
425 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
428 memset(&c
, 0, sizeof(c
));
429 c
.common
.opcode
= cmd
.opcode
;
430 c
.common
.flags
= cmd
.flags
;
431 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
432 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
433 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
434 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
435 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
436 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
437 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
438 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
439 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
442 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
444 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
445 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
446 &cmd
.result
, timeout
);
448 if (put_user(cmd
.result
, &ucmd
->result
))
455 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
456 unsigned int cmd
, unsigned long arg
)
458 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
462 force_successful_syscall_return();
464 case NVME_IOCTL_ADMIN_CMD
:
465 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
466 case NVME_IOCTL_IO_CMD
:
467 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
468 case NVME_IOCTL_SUBMIT_IO
:
469 return nvme_submit_io(ns
, (void __user
*)arg
);
470 #ifdef CONFIG_BLK_DEV_NVME_SCSI
471 case SG_GET_VERSION_NUM
:
472 return nvme_sg_get_version_num((void __user
*)arg
);
474 return nvme_sg_io(ns
, (void __user
*)arg
);
482 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
483 unsigned int cmd
, unsigned long arg
)
489 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
492 #define nvme_compat_ioctl NULL
495 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
497 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
500 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
502 nvme_put_ns(disk
->private_data
);
505 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
507 /* some standard values */
509 geo
->sectors
= 1 << 5;
510 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
514 #ifdef CONFIG_BLK_DEV_INTEGRITY
515 static void nvme_init_integrity(struct nvme_ns
*ns
)
517 struct blk_integrity integrity
;
519 switch (ns
->pi_type
) {
520 case NVME_NS_DPS_PI_TYPE3
:
521 integrity
.profile
= &t10_pi_type3_crc
;
523 case NVME_NS_DPS_PI_TYPE1
:
524 case NVME_NS_DPS_PI_TYPE2
:
525 integrity
.profile
= &t10_pi_type1_crc
;
528 integrity
.profile
= NULL
;
531 integrity
.tuple_size
= ns
->ms
;
532 blk_integrity_register(ns
->disk
, &integrity
);
533 blk_queue_max_integrity_segments(ns
->queue
, 1);
536 static void nvme_init_integrity(struct nvme_ns
*ns
)
539 #endif /* CONFIG_BLK_DEV_INTEGRITY */
541 static void nvme_config_discard(struct nvme_ns
*ns
)
543 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
544 ns
->queue
->limits
.discard_zeroes_data
= 0;
545 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
546 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
547 blk_queue_max_discard_sectors(ns
->queue
, 0xffffffff);
548 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
551 static int nvme_revalidate_disk(struct gendisk
*disk
)
553 struct nvme_ns
*ns
= disk
->private_data
;
554 struct nvme_id_ns
*id
;
559 if (nvme_identify_ns(ns
->ctrl
, ns
->ns_id
, &id
)) {
560 dev_warn(ns
->ctrl
->dev
, "%s: Identify failure nvme%dn%d\n",
561 __func__
, ns
->ctrl
->instance
, ns
->ns_id
);
569 if (nvme_nvm_ns_supported(ns
, id
) && ns
->type
!= NVME_NS_LIGHTNVM
) {
570 if (nvme_nvm_register(ns
->queue
, disk
->disk_name
)) {
571 dev_warn(ns
->ctrl
->dev
,
572 "%s: LightNVM init failure\n", __func__
);
576 ns
->type
= NVME_NS_LIGHTNVM
;
579 if (ns
->ctrl
->vs
>= NVME_VS(1, 1))
580 memcpy(ns
->eui
, id
->eui64
, sizeof(ns
->eui
));
581 if (ns
->ctrl
->vs
>= NVME_VS(1, 2))
582 memcpy(ns
->uuid
, id
->nguid
, sizeof(ns
->uuid
));
585 lbaf
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
586 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
587 ns
->ms
= le16_to_cpu(id
->lbaf
[lbaf
].ms
);
588 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
591 * If identify namespace failed, use default 512 byte block size so
592 * block layer can use before failing read/write for 0 capacity.
594 if (ns
->lba_shift
== 0)
596 bs
= 1 << ns
->lba_shift
;
597 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
598 pi_type
= ns
->ms
== sizeof(struct t10_pi_tuple
) ?
599 id
->dps
& NVME_NS_DPS_PI_MASK
: 0;
601 blk_mq_freeze_queue(disk
->queue
);
602 if (blk_get_integrity(disk
) && (ns
->pi_type
!= pi_type
||
604 bs
!= queue_logical_block_size(disk
->queue
) ||
605 (ns
->ms
&& ns
->ext
)))
606 blk_integrity_unregister(disk
);
608 ns
->pi_type
= pi_type
;
609 blk_queue_logical_block_size(ns
->queue
, bs
);
611 if (ns
->ms
&& !blk_get_integrity(disk
) && !ns
->ext
)
612 nvme_init_integrity(ns
);
613 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
614 set_capacity(disk
, 0);
616 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
618 if (ns
->ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
619 nvme_config_discard(ns
);
620 blk_mq_unfreeze_queue(disk
->queue
);
626 static char nvme_pr_type(enum pr_type type
)
629 case PR_WRITE_EXCLUSIVE
:
631 case PR_EXCLUSIVE_ACCESS
:
633 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
635 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
637 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
639 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
646 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
647 u64 key
, u64 sa_key
, u8 op
)
649 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
650 struct nvme_command c
;
651 u8 data
[16] = { 0, };
653 put_unaligned_le64(key
, &data
[0]);
654 put_unaligned_le64(sa_key
, &data
[8]);
656 memset(&c
, 0, sizeof(c
));
657 c
.common
.opcode
= op
;
658 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
659 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
661 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
664 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
665 u64
new, unsigned flags
)
669 if (flags
& ~PR_FL_IGNORE_KEY
)
673 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
674 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
675 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
678 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
679 enum pr_type type
, unsigned flags
)
683 if (flags
& ~PR_FL_IGNORE_KEY
)
686 cdw10
= nvme_pr_type(type
) << 8;
687 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
688 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
691 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
692 enum pr_type type
, bool abort
)
694 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
695 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
698 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
700 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
701 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
704 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
706 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
707 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
710 static const struct pr_ops nvme_pr_ops
= {
711 .pr_register
= nvme_pr_register
,
712 .pr_reserve
= nvme_pr_reserve
,
713 .pr_release
= nvme_pr_release
,
714 .pr_preempt
= nvme_pr_preempt
,
715 .pr_clear
= nvme_pr_clear
,
718 static const struct block_device_operations nvme_fops
= {
719 .owner
= THIS_MODULE
,
721 .compat_ioctl
= nvme_compat_ioctl
,
723 .release
= nvme_release
,
724 .getgeo
= nvme_getgeo
,
725 .revalidate_disk
= nvme_revalidate_disk
,
726 .pr_ops
= &nvme_pr_ops
,
729 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
731 unsigned long timeout
=
732 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
733 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
736 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
737 if ((csts
& NVME_CSTS_RDY
) == bit
)
741 if (fatal_signal_pending(current
))
743 if (time_after(jiffies
, timeout
)) {
745 "Device not ready; aborting %s\n", enabled
?
746 "initialisation" : "reset");
755 * If the device has been passed off to us in an enabled state, just clear
756 * the enabled bit. The spec says we should set the 'shutdown notification
757 * bits', but doing so may cause the device to complete commands to the
758 * admin queue ... and we don't know what memory that might be pointing at!
760 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
764 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
765 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
767 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
770 return nvme_wait_ready(ctrl
, cap
, false);
773 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
776 * Default to a 4K page size, with the intention to update this
777 * path in the future to accomodate architectures with differing
778 * kernel and IO page sizes.
780 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
783 if (page_shift
< dev_page_min
) {
785 "Minimum device page size %u too large for host (%u)\n",
786 1 << dev_page_min
, 1 << page_shift
);
790 ctrl
->page_size
= 1 << page_shift
;
792 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
793 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
794 ctrl
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
795 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
796 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
798 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
801 return nvme_wait_ready(ctrl
, cap
, true);
804 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
806 unsigned long timeout
= SHUTDOWN_TIMEOUT
+ jiffies
;
810 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
811 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
813 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
817 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
818 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
822 if (fatal_signal_pending(current
))
824 if (time_after(jiffies
, timeout
)) {
826 "Device shutdown incomplete; abort shutdown\n");
835 * Initialize the cached copies of the Identify data and various controller
836 * register in our nvme_ctrl structure. This should be called as soon as
837 * the admin queue is fully up and running.
839 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
841 struct nvme_id_ctrl
*id
;
845 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
847 dev_err(ctrl
->dev
, "Reading VS failed (%d)\n", ret
);
851 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
853 dev_err(ctrl
->dev
, "Reading CAP failed (%d)\n", ret
);
856 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
858 if (ctrl
->vs
>= NVME_VS(1, 1))
859 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
861 ret
= nvme_identify_ctrl(ctrl
, &id
);
863 dev_err(ctrl
->dev
, "Identify Controller failed (%d)\n", ret
);
867 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
868 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
870 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
871 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
872 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
874 ctrl
->max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
876 ctrl
->max_hw_sectors
= UINT_MAX
;
878 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) && id
->vs
[3]) {
879 unsigned int max_hw_sectors
;
881 ctrl
->stripe_size
= 1 << (id
->vs
[3] + page_shift
);
882 max_hw_sectors
= ctrl
->stripe_size
>> (page_shift
- 9);
883 if (ctrl
->max_hw_sectors
) {
884 ctrl
->max_hw_sectors
= min(max_hw_sectors
,
885 ctrl
->max_hw_sectors
);
887 ctrl
->max_hw_sectors
= max_hw_sectors
;
895 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
897 struct nvme_ctrl
*ctrl
;
898 int instance
= iminor(inode
);
901 spin_lock(&dev_list_lock
);
902 list_for_each_entry(ctrl
, &nvme_ctrl_list
, node
) {
903 if (ctrl
->instance
!= instance
)
906 if (!ctrl
->admin_q
) {
910 if (!kref_get_unless_zero(&ctrl
->kref
))
912 file
->private_data
= ctrl
;
916 spin_unlock(&dev_list_lock
);
921 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
923 nvme_put_ctrl(file
->private_data
);
927 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
932 mutex_lock(&ctrl
->namespaces_mutex
);
933 if (list_empty(&ctrl
->namespaces
)) {
938 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
939 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
941 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
947 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
949 mutex_unlock(&ctrl
->namespaces_mutex
);
951 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
956 mutex_unlock(&ctrl
->namespaces_mutex
);
960 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
963 struct nvme_ctrl
*ctrl
= file
->private_data
;
964 void __user
*argp
= (void __user
*)arg
;
967 case NVME_IOCTL_ADMIN_CMD
:
968 return nvme_user_cmd(ctrl
, NULL
, argp
);
969 case NVME_IOCTL_IO_CMD
:
970 return nvme_dev_user_cmd(ctrl
, argp
);
971 case NVME_IOCTL_RESET
:
972 dev_warn(ctrl
->dev
, "resetting controller\n");
973 return ctrl
->ops
->reset_ctrl(ctrl
);
974 case NVME_IOCTL_SUBSYS_RESET
:
975 return nvme_reset_subsystem(ctrl
);
981 static const struct file_operations nvme_dev_fops
= {
982 .owner
= THIS_MODULE
,
983 .open
= nvme_dev_open
,
984 .release
= nvme_dev_release
,
985 .unlocked_ioctl
= nvme_dev_ioctl
,
986 .compat_ioctl
= nvme_dev_ioctl
,
989 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
990 struct device_attribute
*attr
, const char *buf
,
993 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
996 ret
= ctrl
->ops
->reset_ctrl(ctrl
);
1001 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
1003 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
1006 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1007 return sprintf(buf
, "%pU\n", ns
->uuid
);
1009 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
1011 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
1014 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1015 return sprintf(buf
, "%8phd\n", ns
->eui
);
1017 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
1019 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
1022 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1023 return sprintf(buf
, "%d\n", ns
->ns_id
);
1025 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
1027 static struct attribute
*nvme_ns_attrs
[] = {
1028 &dev_attr_uuid
.attr
,
1030 &dev_attr_nsid
.attr
,
1034 static umode_t
nvme_attrs_are_visible(struct kobject
*kobj
,
1035 struct attribute
*a
, int n
)
1037 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1038 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1040 if (a
== &dev_attr_uuid
.attr
) {
1041 if (!memchr_inv(ns
->uuid
, 0, sizeof(ns
->uuid
)))
1044 if (a
== &dev_attr_eui
.attr
) {
1045 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1051 static const struct attribute_group nvme_ns_attr_group
= {
1052 .attrs
= nvme_ns_attrs
,
1053 .is_visible
= nvme_attrs_are_visible
,
1056 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1058 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
1059 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
1061 return nsa
->ns_id
- nsb
->ns_id
;
1064 static struct nvme_ns
*nvme_find_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1068 lockdep_assert_held(&ctrl
->namespaces_mutex
);
1070 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1071 if (ns
->ns_id
== nsid
)
1073 if (ns
->ns_id
> nsid
)
1079 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1082 struct gendisk
*disk
;
1083 int node
= dev_to_node(ctrl
->dev
);
1085 lockdep_assert_held(&ctrl
->namespaces_mutex
);
1087 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
1091 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
1092 if (IS_ERR(ns
->queue
))
1094 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES
, ns
->queue
);
1095 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
1096 ns
->queue
->queuedata
= ns
;
1099 disk
= alloc_disk_node(0, node
);
1101 goto out_free_queue
;
1103 kref_init(&ns
->kref
);
1106 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
1108 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
1109 if (ctrl
->max_hw_sectors
) {
1110 blk_queue_max_hw_sectors(ns
->queue
, ctrl
->max_hw_sectors
);
1111 blk_queue_max_segments(ns
->queue
,
1112 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1);
1114 if (ctrl
->stripe_size
)
1115 blk_queue_chunk_sectors(ns
->queue
, ctrl
->stripe_size
>> 9);
1116 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1117 blk_queue_flush(ns
->queue
, REQ_FLUSH
| REQ_FUA
);
1118 blk_queue_virt_boundary(ns
->queue
, ctrl
->page_size
- 1);
1120 disk
->major
= nvme_major
;
1121 disk
->first_minor
= 0;
1122 disk
->fops
= &nvme_fops
;
1123 disk
->private_data
= ns
;
1124 disk
->queue
= ns
->queue
;
1125 disk
->driverfs_dev
= ctrl
->device
;
1126 disk
->flags
= GENHD_FL_EXT_DEVT
;
1127 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
, nsid
);
1129 if (nvme_revalidate_disk(ns
->disk
))
1132 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
1133 kref_get(&ctrl
->kref
);
1134 if (ns
->type
== NVME_NS_LIGHTNVM
)
1138 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
1139 &nvme_ns_attr_group
))
1140 pr_warn("%s: failed to create sysfs group for identification\n",
1141 ns
->disk
->disk_name
);
1146 blk_cleanup_queue(ns
->queue
);
1151 static void nvme_ns_remove(struct nvme_ns
*ns
)
1153 bool kill
= nvme_io_incapable(ns
->ctrl
) &&
1154 !blk_queue_dying(ns
->queue
);
1156 lockdep_assert_held(&ns
->ctrl
->namespaces_mutex
);
1159 blk_set_queue_dying(ns
->queue
);
1160 if (ns
->disk
->flags
& GENHD_FL_UP
) {
1161 if (blk_get_integrity(ns
->disk
))
1162 blk_integrity_unregister(ns
->disk
);
1163 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
1164 &nvme_ns_attr_group
);
1165 del_gendisk(ns
->disk
);
1167 if (kill
|| !blk_queue_dying(ns
->queue
)) {
1168 blk_mq_abort_requeue_list(ns
->queue
);
1169 blk_cleanup_queue(ns
->queue
);
1171 list_del_init(&ns
->list
);
1175 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1179 ns
= nvme_find_ns(ctrl
, nsid
);
1181 if (revalidate_disk(ns
->disk
))
1184 nvme_alloc_ns(ctrl
, nsid
);
1187 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
1191 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
1194 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
1198 for (i
= 0; i
< num_lists
; i
++) {
1199 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
1203 for (j
= 0; j
< min(nn
, 1024U); j
++) {
1204 nsid
= le32_to_cpu(ns_list
[j
]);
1208 nvme_validate_ns(ctrl
, nsid
);
1210 while (++prev
< nsid
) {
1211 ns
= nvme_find_ns(ctrl
, prev
);
1223 static void __nvme_scan_namespaces(struct nvme_ctrl
*ctrl
, unsigned nn
)
1225 struct nvme_ns
*ns
, *next
;
1228 lockdep_assert_held(&ctrl
->namespaces_mutex
);
1230 for (i
= 1; i
<= nn
; i
++)
1231 nvme_validate_ns(ctrl
, i
);
1233 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
1239 void nvme_scan_namespaces(struct nvme_ctrl
*ctrl
)
1241 struct nvme_id_ctrl
*id
;
1244 if (nvme_identify_ctrl(ctrl
, &id
))
1247 mutex_lock(&ctrl
->namespaces_mutex
);
1248 nn
= le32_to_cpu(id
->nn
);
1249 if (ctrl
->vs
>= NVME_VS(1, 1) &&
1250 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
1251 if (!nvme_scan_ns_list(ctrl
, nn
))
1254 __nvme_scan_namespaces(ctrl
, le32_to_cpup(&id
->nn
));
1256 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
1257 mutex_unlock(&ctrl
->namespaces_mutex
);
1261 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
1263 struct nvme_ns
*ns
, *next
;
1265 mutex_lock(&ctrl
->namespaces_mutex
);
1266 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
1268 mutex_unlock(&ctrl
->namespaces_mutex
);
1271 static DEFINE_IDA(nvme_instance_ida
);
1273 static int nvme_set_instance(struct nvme_ctrl
*ctrl
)
1275 int instance
, error
;
1278 if (!ida_pre_get(&nvme_instance_ida
, GFP_KERNEL
))
1281 spin_lock(&dev_list_lock
);
1282 error
= ida_get_new(&nvme_instance_ida
, &instance
);
1283 spin_unlock(&dev_list_lock
);
1284 } while (error
== -EAGAIN
);
1289 ctrl
->instance
= instance
;
1293 static void nvme_release_instance(struct nvme_ctrl
*ctrl
)
1295 spin_lock(&dev_list_lock
);
1296 ida_remove(&nvme_instance_ida
, ctrl
->instance
);
1297 spin_unlock(&dev_list_lock
);
1300 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
1302 device_remove_file(ctrl
->device
, &dev_attr_reset_controller
);
1303 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
1305 spin_lock(&dev_list_lock
);
1306 list_del(&ctrl
->node
);
1307 spin_unlock(&dev_list_lock
);
1310 static void nvme_free_ctrl(struct kref
*kref
)
1312 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
1314 put_device(ctrl
->device
);
1315 nvme_release_instance(ctrl
);
1317 ctrl
->ops
->free_ctrl(ctrl
);
1320 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
1322 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
1326 * Initialize a NVMe controller structures. This needs to be called during
1327 * earliest initialization so that we have the initialized structured around
1330 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
1331 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
1335 INIT_LIST_HEAD(&ctrl
->namespaces
);
1336 mutex_init(&ctrl
->namespaces_mutex
);
1337 kref_init(&ctrl
->kref
);
1340 ctrl
->quirks
= quirks
;
1342 ret
= nvme_set_instance(ctrl
);
1346 ctrl
->device
= device_create(nvme_class
, ctrl
->dev
,
1347 MKDEV(nvme_char_major
, ctrl
->instance
),
1348 dev
, "nvme%d", ctrl
->instance
);
1349 if (IS_ERR(ctrl
->device
)) {
1350 ret
= PTR_ERR(ctrl
->device
);
1351 goto out_release_instance
;
1353 get_device(ctrl
->device
);
1354 dev_set_drvdata(ctrl
->device
, ctrl
);
1356 ret
= device_create_file(ctrl
->device
, &dev_attr_reset_controller
);
1358 goto out_put_device
;
1360 spin_lock(&dev_list_lock
);
1361 list_add_tail(&ctrl
->node
, &nvme_ctrl_list
);
1362 spin_unlock(&dev_list_lock
);
1367 put_device(ctrl
->device
);
1368 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
1369 out_release_instance
:
1370 nvme_release_instance(ctrl
);
1375 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
1379 mutex_lock(&ctrl
->namespaces_mutex
);
1380 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1381 spin_lock_irq(ns
->queue
->queue_lock
);
1382 queue_flag_set(QUEUE_FLAG_STOPPED
, ns
->queue
);
1383 spin_unlock_irq(ns
->queue
->queue_lock
);
1385 blk_mq_cancel_requeue_work(ns
->queue
);
1386 blk_mq_stop_hw_queues(ns
->queue
);
1388 mutex_unlock(&ctrl
->namespaces_mutex
);
1391 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
1395 mutex_lock(&ctrl
->namespaces_mutex
);
1396 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1397 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED
, ns
->queue
);
1398 blk_mq_start_stopped_hw_queues(ns
->queue
, true);
1399 blk_mq_kick_requeue_list(ns
->queue
);
1401 mutex_unlock(&ctrl
->namespaces_mutex
);
1404 int __init
nvme_core_init(void)
1408 result
= register_blkdev(nvme_major
, "nvme");
1411 else if (result
> 0)
1412 nvme_major
= result
;
1414 result
= __register_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme",
1417 goto unregister_blkdev
;
1418 else if (result
> 0)
1419 nvme_char_major
= result
;
1421 nvme_class
= class_create(THIS_MODULE
, "nvme");
1422 if (IS_ERR(nvme_class
)) {
1423 result
= PTR_ERR(nvme_class
);
1424 goto unregister_chrdev
;
1430 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
1432 unregister_blkdev(nvme_major
, "nvme");
1436 void nvme_core_exit(void)
1438 unregister_blkdev(nvme_major
, "nvme");
1439 class_destroy(nvme_class
);
1440 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");