2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
30 #include <asm/unaligned.h>
34 #define NVME_MINORS (1U << MINORBITS)
36 unsigned char admin_timeout
= 60;
37 module_param(admin_timeout
, byte
, 0644);
38 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
39 EXPORT_SYMBOL_GPL(admin_timeout
);
41 unsigned char nvme_io_timeout
= 30;
42 module_param_named(io_timeout
, nvme_io_timeout
, byte
, 0644);
43 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
44 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
46 unsigned char shutdown_timeout
= 5;
47 module_param(shutdown_timeout
, byte
, 0644);
48 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
50 static int nvme_major
;
51 module_param(nvme_major
, int, 0);
53 static int nvme_char_major
;
54 module_param(nvme_char_major
, int, 0);
56 static LIST_HEAD(nvme_ctrl_list
);
57 static DEFINE_SPINLOCK(dev_list_lock
);
59 static struct class *nvme_class
;
61 static void nvme_free_ns(struct kref
*kref
)
63 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
65 if (ns
->type
== NVME_NS_LIGHTNVM
)
66 nvme_nvm_unregister(ns
->queue
, ns
->disk
->disk_name
);
68 spin_lock(&dev_list_lock
);
69 ns
->disk
->private_data
= NULL
;
70 spin_unlock(&dev_list_lock
);
73 ida_simple_remove(&ns
->ctrl
->ns_ida
, ns
->instance
);
74 nvme_put_ctrl(ns
->ctrl
);
78 static void nvme_put_ns(struct nvme_ns
*ns
)
80 kref_put(&ns
->kref
, nvme_free_ns
);
83 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
87 spin_lock(&dev_list_lock
);
88 ns
= disk
->private_data
;
90 if (!kref_get_unless_zero(&ns
->kref
))
92 if (!try_module_get(ns
->ctrl
->ops
->module
))
95 spin_unlock(&dev_list_lock
);
100 kref_put(&ns
->kref
, nvme_free_ns
);
102 spin_unlock(&dev_list_lock
);
106 void nvme_requeue_req(struct request
*req
)
110 blk_mq_requeue_request(req
);
111 spin_lock_irqsave(req
->q
->queue_lock
, flags
);
112 if (!blk_queue_stopped(req
->q
))
113 blk_mq_kick_requeue_list(req
->q
);
114 spin_unlock_irqrestore(req
->q
->queue_lock
, flags
);
116 EXPORT_SYMBOL_GPL(nvme_requeue_req
);
118 struct request
*nvme_alloc_request(struct request_queue
*q
,
119 struct nvme_command
*cmd
, unsigned int flags
)
121 bool write
= cmd
->common
.opcode
& 1;
124 req
= blk_mq_alloc_request(q
, write
, flags
);
128 req
->cmd_type
= REQ_TYPE_DRV_PRIV
;
129 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
131 req
->__sector
= (sector_t
) -1;
132 req
->bio
= req
->biotail
= NULL
;
134 req
->cmd
= (unsigned char *)cmd
;
135 req
->cmd_len
= sizeof(struct nvme_command
);
139 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
141 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
142 struct nvme_command
*cmnd
)
144 memset(cmnd
, 0, sizeof(*cmnd
));
145 cmnd
->common
.opcode
= nvme_cmd_flush
;
146 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
149 static inline int nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
150 struct nvme_command
*cmnd
)
152 struct nvme_dsm_range
*range
;
155 unsigned int nr_bytes
= blk_rq_bytes(req
);
157 range
= kmalloc(sizeof(*range
), GFP_ATOMIC
);
159 return BLK_MQ_RQ_QUEUE_BUSY
;
161 range
->cattr
= cpu_to_le32(0);
162 range
->nlb
= cpu_to_le32(nr_bytes
>> ns
->lba_shift
);
163 range
->slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
165 memset(cmnd
, 0, sizeof(*cmnd
));
166 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
167 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->ns_id
);
169 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
171 req
->completion_data
= range
;
172 page
= virt_to_page(range
);
173 offset
= offset_in_page(range
);
174 blk_add_request_payload(req
, page
, offset
, sizeof(*range
));
177 * we set __data_len back to the size of the area to be discarded
178 * on disk. This allows us to report completion on the full amount
179 * of blocks described by the request.
181 req
->__data_len
= nr_bytes
;
186 static inline void nvme_setup_rw(struct nvme_ns
*ns
, struct request
*req
,
187 struct nvme_command
*cmnd
)
192 if (req
->cmd_flags
& REQ_FUA
)
193 control
|= NVME_RW_FUA
;
194 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
195 control
|= NVME_RW_LR
;
197 if (req
->cmd_flags
& REQ_RAHEAD
)
198 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
200 memset(cmnd
, 0, sizeof(*cmnd
));
201 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
202 cmnd
->rw
.command_id
= req
->tag
;
203 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
204 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
205 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
208 switch (ns
->pi_type
) {
209 case NVME_NS_DPS_PI_TYPE3
:
210 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
212 case NVME_NS_DPS_PI_TYPE1
:
213 case NVME_NS_DPS_PI_TYPE2
:
214 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
215 NVME_RW_PRINFO_PRCHK_REF
;
216 cmnd
->rw
.reftag
= cpu_to_le32(
217 nvme_block_nr(ns
, blk_rq_pos(req
)));
220 if (!blk_integrity_rq(req
))
221 control
|= NVME_RW_PRINFO_PRACT
;
224 cmnd
->rw
.control
= cpu_to_le16(control
);
225 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
228 int nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
229 struct nvme_command
*cmd
)
233 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
234 memcpy(cmd
, req
->cmd
, sizeof(*cmd
));
235 else if (req
->cmd_flags
& REQ_FLUSH
)
236 nvme_setup_flush(ns
, cmd
);
237 else if (req
->cmd_flags
& REQ_DISCARD
)
238 ret
= nvme_setup_discard(ns
, req
, cmd
);
240 nvme_setup_rw(ns
, req
, cmd
);
244 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
247 * Returns 0 on success. If the result is negative, it's a Linux error code;
248 * if the result is positive, it's an NVM Express status code
250 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
251 struct nvme_completion
*cqe
, void *buffer
, unsigned bufflen
,
257 req
= nvme_alloc_request(q
, cmd
, 0);
261 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
264 if (buffer
&& bufflen
) {
265 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
270 blk_execute_rq(req
->q
, NULL
, req
, 0);
273 blk_mq_free_request(req
);
277 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
278 void *buffer
, unsigned bufflen
)
280 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0);
282 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
284 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
285 void __user
*ubuffer
, unsigned bufflen
,
286 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
287 u32
*result
, unsigned timeout
)
289 bool write
= cmd
->common
.opcode
& 1;
290 struct nvme_completion cqe
;
291 struct nvme_ns
*ns
= q
->queuedata
;
292 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
294 struct bio
*bio
= NULL
;
298 req
= nvme_alloc_request(q
, cmd
, 0);
302 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
305 if (ubuffer
&& bufflen
) {
306 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
314 bio
->bi_bdev
= bdget_disk(disk
, 0);
320 if (meta_buffer
&& meta_len
) {
321 struct bio_integrity_payload
*bip
;
323 meta
= kmalloc(meta_len
, GFP_KERNEL
);
330 if (copy_from_user(meta
, meta_buffer
,
337 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
343 bip
->bip_iter
.bi_size
= meta_len
;
344 bip
->bip_iter
.bi_sector
= meta_seed
;
346 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
347 meta_len
, offset_in_page(meta
));
348 if (ret
!= meta_len
) {
355 blk_execute_rq(req
->q
, disk
, req
, 0);
358 *result
= le32_to_cpu(cqe
.result
);
359 if (meta
&& !ret
&& !write
) {
360 if (copy_to_user(meta_buffer
, meta
, meta_len
))
367 if (disk
&& bio
->bi_bdev
)
369 blk_rq_unmap_user(bio
);
372 blk_mq_free_request(req
);
376 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
377 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
380 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
384 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
386 struct nvme_command c
= { };
389 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
390 c
.identify
.opcode
= nvme_admin_identify
;
391 c
.identify
.cns
= cpu_to_le32(1);
393 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
397 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
398 sizeof(struct nvme_id_ctrl
));
404 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
406 struct nvme_command c
= { };
408 c
.identify
.opcode
= nvme_admin_identify
;
409 c
.identify
.cns
= cpu_to_le32(2);
410 c
.identify
.nsid
= cpu_to_le32(nsid
);
411 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
414 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
415 struct nvme_id_ns
**id
)
417 struct nvme_command c
= { };
420 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
421 c
.identify
.opcode
= nvme_admin_identify
,
422 c
.identify
.nsid
= cpu_to_le32(nsid
),
424 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
428 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
429 sizeof(struct nvme_id_ns
));
435 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
436 dma_addr_t dma_addr
, u32
*result
)
438 struct nvme_command c
;
439 struct nvme_completion cqe
;
442 memset(&c
, 0, sizeof(c
));
443 c
.features
.opcode
= nvme_admin_get_features
;
444 c
.features
.nsid
= cpu_to_le32(nsid
);
445 c
.features
.prp1
= cpu_to_le64(dma_addr
);
446 c
.features
.fid
= cpu_to_le32(fid
);
448 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &cqe
, NULL
, 0, 0);
450 *result
= le32_to_cpu(cqe
.result
);
454 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
455 dma_addr_t dma_addr
, u32
*result
)
457 struct nvme_command c
;
458 struct nvme_completion cqe
;
461 memset(&c
, 0, sizeof(c
));
462 c
.features
.opcode
= nvme_admin_set_features
;
463 c
.features
.prp1
= cpu_to_le64(dma_addr
);
464 c
.features
.fid
= cpu_to_le32(fid
);
465 c
.features
.dword11
= cpu_to_le32(dword11
);
467 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &cqe
, NULL
, 0, 0);
469 *result
= le32_to_cpu(cqe
.result
);
473 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
)
475 struct nvme_command c
= { };
478 c
.common
.opcode
= nvme_admin_get_log_page
,
479 c
.common
.nsid
= cpu_to_le32(0xFFFFFFFF),
480 c
.common
.cdw10
[0] = cpu_to_le32(
481 (((sizeof(struct nvme_smart_log
) / 4) - 1) << 16) |
484 *log
= kmalloc(sizeof(struct nvme_smart_log
), GFP_KERNEL
);
488 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *log
,
489 sizeof(struct nvme_smart_log
));
495 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
497 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
499 int status
, nr_io_queues
;
501 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, 0,
506 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
507 *count
= min(*count
, nr_io_queues
);
510 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
512 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
514 struct nvme_user_io io
;
515 struct nvme_command c
;
516 unsigned length
, meta_len
;
517 void __user
*metadata
;
519 if (copy_from_user(&io
, uio
, sizeof(io
)))
527 case nvme_cmd_compare
:
533 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
534 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
535 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
540 } else if (meta_len
) {
541 if ((io
.metadata
& 3) || !io
.metadata
)
545 memset(&c
, 0, sizeof(c
));
546 c
.rw
.opcode
= io
.opcode
;
547 c
.rw
.flags
= io
.flags
;
548 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
549 c
.rw
.slba
= cpu_to_le64(io
.slba
);
550 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
551 c
.rw
.control
= cpu_to_le16(io
.control
);
552 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
553 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
554 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
555 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
557 return __nvme_submit_user_cmd(ns
->queue
, &c
,
558 (void __user
*)(uintptr_t)io
.addr
, length
,
559 metadata
, meta_len
, io
.slba
, NULL
, 0);
562 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
563 struct nvme_passthru_cmd __user
*ucmd
)
565 struct nvme_passthru_cmd cmd
;
566 struct nvme_command c
;
567 unsigned timeout
= 0;
570 if (!capable(CAP_SYS_ADMIN
))
572 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
577 memset(&c
, 0, sizeof(c
));
578 c
.common
.opcode
= cmd
.opcode
;
579 c
.common
.flags
= cmd
.flags
;
580 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
581 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
582 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
583 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
584 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
585 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
586 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
587 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
588 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
591 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
593 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
594 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
595 &cmd
.result
, timeout
);
597 if (put_user(cmd
.result
, &ucmd
->result
))
604 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
605 unsigned int cmd
, unsigned long arg
)
607 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
611 force_successful_syscall_return();
613 case NVME_IOCTL_ADMIN_CMD
:
614 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
615 case NVME_IOCTL_IO_CMD
:
616 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
617 case NVME_IOCTL_SUBMIT_IO
:
618 return nvme_submit_io(ns
, (void __user
*)arg
);
619 #ifdef CONFIG_BLK_DEV_NVME_SCSI
620 case SG_GET_VERSION_NUM
:
621 return nvme_sg_get_version_num((void __user
*)arg
);
623 return nvme_sg_io(ns
, (void __user
*)arg
);
631 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
632 unsigned int cmd
, unsigned long arg
)
638 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
641 #define nvme_compat_ioctl NULL
644 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
646 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
649 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
651 struct nvme_ns
*ns
= disk
->private_data
;
653 module_put(ns
->ctrl
->ops
->module
);
657 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
659 /* some standard values */
661 geo
->sectors
= 1 << 5;
662 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
666 #ifdef CONFIG_BLK_DEV_INTEGRITY
667 static void nvme_init_integrity(struct nvme_ns
*ns
)
669 struct blk_integrity integrity
;
671 switch (ns
->pi_type
) {
672 case NVME_NS_DPS_PI_TYPE3
:
673 integrity
.profile
= &t10_pi_type3_crc
;
675 case NVME_NS_DPS_PI_TYPE1
:
676 case NVME_NS_DPS_PI_TYPE2
:
677 integrity
.profile
= &t10_pi_type1_crc
;
680 integrity
.profile
= NULL
;
683 integrity
.tuple_size
= ns
->ms
;
684 blk_integrity_register(ns
->disk
, &integrity
);
685 blk_queue_max_integrity_segments(ns
->queue
, 1);
688 static void nvme_init_integrity(struct nvme_ns
*ns
)
691 #endif /* CONFIG_BLK_DEV_INTEGRITY */
693 static void nvme_config_discard(struct nvme_ns
*ns
)
695 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
696 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
698 if (ctrl
->quirks
& NVME_QUIRK_DISCARD_ZEROES
)
699 ns
->queue
->limits
.discard_zeroes_data
= 1;
701 ns
->queue
->limits
.discard_zeroes_data
= 0;
703 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
704 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
705 blk_queue_max_discard_sectors(ns
->queue
, 0xffffffff);
706 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
709 static int nvme_revalidate_disk(struct gendisk
*disk
)
711 struct nvme_ns
*ns
= disk
->private_data
;
712 struct nvme_id_ns
*id
;
717 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
718 set_capacity(disk
, 0);
721 if (nvme_identify_ns(ns
->ctrl
, ns
->ns_id
, &id
)) {
722 dev_warn(disk_to_dev(ns
->disk
), "%s: Identify failure\n",
731 if (nvme_nvm_ns_supported(ns
, id
) && ns
->type
!= NVME_NS_LIGHTNVM
) {
732 if (nvme_nvm_register(ns
->queue
, disk
->disk_name
)) {
733 dev_warn(disk_to_dev(ns
->disk
),
734 "%s: LightNVM init failure\n", __func__
);
738 ns
->type
= NVME_NS_LIGHTNVM
;
741 if (ns
->ctrl
->vs
>= NVME_VS(1, 1))
742 memcpy(ns
->eui
, id
->eui64
, sizeof(ns
->eui
));
743 if (ns
->ctrl
->vs
>= NVME_VS(1, 2))
744 memcpy(ns
->uuid
, id
->nguid
, sizeof(ns
->uuid
));
747 lbaf
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
748 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
749 ns
->ms
= le16_to_cpu(id
->lbaf
[lbaf
].ms
);
750 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
753 * If identify namespace failed, use default 512 byte block size so
754 * block layer can use before failing read/write for 0 capacity.
756 if (ns
->lba_shift
== 0)
758 bs
= 1 << ns
->lba_shift
;
759 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
760 pi_type
= ns
->ms
== sizeof(struct t10_pi_tuple
) ?
761 id
->dps
& NVME_NS_DPS_PI_MASK
: 0;
763 blk_mq_freeze_queue(disk
->queue
);
764 if (blk_get_integrity(disk
) && (ns
->pi_type
!= pi_type
||
766 bs
!= queue_logical_block_size(disk
->queue
) ||
767 (ns
->ms
&& ns
->ext
)))
768 blk_integrity_unregister(disk
);
770 ns
->pi_type
= pi_type
;
771 blk_queue_logical_block_size(ns
->queue
, bs
);
773 if (ns
->ms
&& !blk_get_integrity(disk
) && !ns
->ext
)
774 nvme_init_integrity(ns
);
775 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
776 set_capacity(disk
, 0);
778 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
780 if (ns
->ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
781 nvme_config_discard(ns
);
782 blk_mq_unfreeze_queue(disk
->queue
);
788 static char nvme_pr_type(enum pr_type type
)
791 case PR_WRITE_EXCLUSIVE
:
793 case PR_EXCLUSIVE_ACCESS
:
795 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
797 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
799 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
801 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
808 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
809 u64 key
, u64 sa_key
, u8 op
)
811 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
812 struct nvme_command c
;
813 u8 data
[16] = { 0, };
815 put_unaligned_le64(key
, &data
[0]);
816 put_unaligned_le64(sa_key
, &data
[8]);
818 memset(&c
, 0, sizeof(c
));
819 c
.common
.opcode
= op
;
820 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
821 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
823 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
826 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
827 u64
new, unsigned flags
)
831 if (flags
& ~PR_FL_IGNORE_KEY
)
835 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
836 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
837 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
840 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
841 enum pr_type type
, unsigned flags
)
845 if (flags
& ~PR_FL_IGNORE_KEY
)
848 cdw10
= nvme_pr_type(type
) << 8;
849 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
850 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
853 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
854 enum pr_type type
, bool abort
)
856 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
857 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
860 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
862 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
863 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
866 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
868 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
869 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
872 static const struct pr_ops nvme_pr_ops
= {
873 .pr_register
= nvme_pr_register
,
874 .pr_reserve
= nvme_pr_reserve
,
875 .pr_release
= nvme_pr_release
,
876 .pr_preempt
= nvme_pr_preempt
,
877 .pr_clear
= nvme_pr_clear
,
880 static const struct block_device_operations nvme_fops
= {
881 .owner
= THIS_MODULE
,
883 .compat_ioctl
= nvme_compat_ioctl
,
885 .release
= nvme_release
,
886 .getgeo
= nvme_getgeo
,
887 .revalidate_disk
= nvme_revalidate_disk
,
888 .pr_ops
= &nvme_pr_ops
,
891 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
893 unsigned long timeout
=
894 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
895 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
898 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
899 if ((csts
& NVME_CSTS_RDY
) == bit
)
903 if (fatal_signal_pending(current
))
905 if (time_after(jiffies
, timeout
)) {
906 dev_err(ctrl
->device
,
907 "Device not ready; aborting %s\n", enabled
?
908 "initialisation" : "reset");
917 * If the device has been passed off to us in an enabled state, just clear
918 * the enabled bit. The spec says we should set the 'shutdown notification
919 * bits', but doing so may cause the device to complete commands to the
920 * admin queue ... and we don't know what memory that might be pointing at!
922 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
926 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
927 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
929 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
932 return nvme_wait_ready(ctrl
, cap
, false);
934 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
936 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
939 * Default to a 4K page size, with the intention to update this
940 * path in the future to accomodate architectures with differing
941 * kernel and IO page sizes.
943 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
946 if (page_shift
< dev_page_min
) {
947 dev_err(ctrl
->device
,
948 "Minimum device page size %u too large for host (%u)\n",
949 1 << dev_page_min
, 1 << page_shift
);
953 ctrl
->page_size
= 1 << page_shift
;
955 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
956 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
957 ctrl
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
958 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
959 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
961 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
964 return nvme_wait_ready(ctrl
, cap
, true);
966 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
968 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
970 unsigned long timeout
= SHUTDOWN_TIMEOUT
+ jiffies
;
974 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
975 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
977 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
981 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
982 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
986 if (fatal_signal_pending(current
))
988 if (time_after(jiffies
, timeout
)) {
989 dev_err(ctrl
->device
,
990 "Device shutdown incomplete; abort shutdown\n");
997 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
999 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1000 struct request_queue
*q
)
1004 if (ctrl
->max_hw_sectors
) {
1006 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1008 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1009 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1011 if (ctrl
->stripe_size
)
1012 blk_queue_chunk_sectors(q
, ctrl
->stripe_size
>> 9);
1013 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1014 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1016 blk_queue_write_cache(q
, vwc
, vwc
);
1020 * Initialize the cached copies of the Identify data and various controller
1021 * register in our nvme_ctrl structure. This should be called as soon as
1022 * the admin queue is fully up and running.
1024 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
1026 struct nvme_id_ctrl
*id
;
1028 int ret
, page_shift
;
1030 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
1032 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
1036 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
1038 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
1041 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
1043 if (ctrl
->vs
>= NVME_VS(1, 1))
1044 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
1046 ret
= nvme_identify_ctrl(ctrl
, &id
);
1048 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
1052 ctrl
->vid
= le16_to_cpu(id
->vid
);
1053 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
1054 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
1055 ctrl
->vwc
= id
->vwc
;
1056 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
1057 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
1058 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
1059 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
1061 ctrl
->max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
1063 ctrl
->max_hw_sectors
= UINT_MAX
;
1065 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) && id
->vs
[3]) {
1066 unsigned int max_hw_sectors
;
1068 ctrl
->stripe_size
= 1 << (id
->vs
[3] + page_shift
);
1069 max_hw_sectors
= ctrl
->stripe_size
>> (page_shift
- 9);
1070 if (ctrl
->max_hw_sectors
) {
1071 ctrl
->max_hw_sectors
= min(max_hw_sectors
,
1072 ctrl
->max_hw_sectors
);
1074 ctrl
->max_hw_sectors
= max_hw_sectors
;
1078 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
1083 EXPORT_SYMBOL_GPL(nvme_init_identify
);
1085 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
1087 struct nvme_ctrl
*ctrl
;
1088 int instance
= iminor(inode
);
1091 spin_lock(&dev_list_lock
);
1092 list_for_each_entry(ctrl
, &nvme_ctrl_list
, node
) {
1093 if (ctrl
->instance
!= instance
)
1096 if (!ctrl
->admin_q
) {
1100 if (!kref_get_unless_zero(&ctrl
->kref
))
1102 file
->private_data
= ctrl
;
1106 spin_unlock(&dev_list_lock
);
1111 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
1113 nvme_put_ctrl(file
->private_data
);
1117 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
1122 mutex_lock(&ctrl
->namespaces_mutex
);
1123 if (list_empty(&ctrl
->namespaces
)) {
1128 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
1129 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
1130 dev_warn(ctrl
->device
,
1131 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1136 dev_warn(ctrl
->device
,
1137 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1138 kref_get(&ns
->kref
);
1139 mutex_unlock(&ctrl
->namespaces_mutex
);
1141 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
1146 mutex_unlock(&ctrl
->namespaces_mutex
);
1150 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
1153 struct nvme_ctrl
*ctrl
= file
->private_data
;
1154 void __user
*argp
= (void __user
*)arg
;
1157 case NVME_IOCTL_ADMIN_CMD
:
1158 return nvme_user_cmd(ctrl
, NULL
, argp
);
1159 case NVME_IOCTL_IO_CMD
:
1160 return nvme_dev_user_cmd(ctrl
, argp
);
1161 case NVME_IOCTL_RESET
:
1162 dev_warn(ctrl
->device
, "resetting controller\n");
1163 return ctrl
->ops
->reset_ctrl(ctrl
);
1164 case NVME_IOCTL_SUBSYS_RESET
:
1165 return nvme_reset_subsystem(ctrl
);
1171 static const struct file_operations nvme_dev_fops
= {
1172 .owner
= THIS_MODULE
,
1173 .open
= nvme_dev_open
,
1174 .release
= nvme_dev_release
,
1175 .unlocked_ioctl
= nvme_dev_ioctl
,
1176 .compat_ioctl
= nvme_dev_ioctl
,
1179 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
1180 struct device_attribute
*attr
, const char *buf
,
1183 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1186 ret
= ctrl
->ops
->reset_ctrl(ctrl
);
1191 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
1193 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
1196 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1197 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1198 int serial_len
= sizeof(ctrl
->serial
);
1199 int model_len
= sizeof(ctrl
->model
);
1201 if (memchr_inv(ns
->uuid
, 0, sizeof(ns
->uuid
)))
1202 return sprintf(buf
, "eui.%16phN\n", ns
->uuid
);
1204 if (memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1205 return sprintf(buf
, "eui.%8phN\n", ns
->eui
);
1207 while (ctrl
->serial
[serial_len
- 1] == ' ')
1209 while (ctrl
->model
[model_len
- 1] == ' ')
1212 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl
->vid
,
1213 serial_len
, ctrl
->serial
, model_len
, ctrl
->model
, ns
->ns_id
);
1215 static DEVICE_ATTR(wwid
, S_IRUGO
, wwid_show
, NULL
);
1217 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
1220 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1221 return sprintf(buf
, "%pU\n", ns
->uuid
);
1223 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
1225 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
1228 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1229 return sprintf(buf
, "%8phd\n", ns
->eui
);
1231 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
1233 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
1236 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1237 return sprintf(buf
, "%d\n", ns
->ns_id
);
1239 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
1241 static struct attribute
*nvme_ns_attrs
[] = {
1242 &dev_attr_wwid
.attr
,
1243 &dev_attr_uuid
.attr
,
1245 &dev_attr_nsid
.attr
,
1249 static umode_t
nvme_attrs_are_visible(struct kobject
*kobj
,
1250 struct attribute
*a
, int n
)
1252 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1253 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1255 if (a
== &dev_attr_uuid
.attr
) {
1256 if (!memchr_inv(ns
->uuid
, 0, sizeof(ns
->uuid
)))
1259 if (a
== &dev_attr_eui
.attr
) {
1260 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1266 static const struct attribute_group nvme_ns_attr_group
= {
1267 .attrs
= nvme_ns_attrs
,
1268 .is_visible
= nvme_attrs_are_visible
,
1271 #define nvme_show_str_function(field) \
1272 static ssize_t field##_show(struct device *dev, \
1273 struct device_attribute *attr, char *buf) \
1275 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1276 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
1278 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1280 #define nvme_show_int_function(field) \
1281 static ssize_t field##_show(struct device *dev, \
1282 struct device_attribute *attr, char *buf) \
1284 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1285 return sprintf(buf, "%d\n", ctrl->field); \
1287 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1289 nvme_show_str_function(model
);
1290 nvme_show_str_function(serial
);
1291 nvme_show_str_function(firmware_rev
);
1292 nvme_show_int_function(cntlid
);
1294 static struct attribute
*nvme_dev_attrs
[] = {
1295 &dev_attr_reset_controller
.attr
,
1296 &dev_attr_model
.attr
,
1297 &dev_attr_serial
.attr
,
1298 &dev_attr_firmware_rev
.attr
,
1299 &dev_attr_cntlid
.attr
,
1303 static struct attribute_group nvme_dev_attrs_group
= {
1304 .attrs
= nvme_dev_attrs
,
1307 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
1308 &nvme_dev_attrs_group
,
1312 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1314 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
1315 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
1317 return nsa
->ns_id
- nsb
->ns_id
;
1320 static struct nvme_ns
*nvme_find_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1324 lockdep_assert_held(&ctrl
->namespaces_mutex
);
1326 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1327 if (ns
->ns_id
== nsid
)
1329 if (ns
->ns_id
> nsid
)
1335 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1338 struct gendisk
*disk
;
1339 int node
= dev_to_node(ctrl
->dev
);
1341 lockdep_assert_held(&ctrl
->namespaces_mutex
);
1343 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
1347 ns
->instance
= ida_simple_get(&ctrl
->ns_ida
, 1, 0, GFP_KERNEL
);
1348 if (ns
->instance
< 0)
1351 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
1352 if (IS_ERR(ns
->queue
))
1353 goto out_release_instance
;
1354 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
1355 ns
->queue
->queuedata
= ns
;
1358 disk
= alloc_disk_node(0, node
);
1360 goto out_free_queue
;
1362 kref_init(&ns
->kref
);
1365 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
1368 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
1369 nvme_set_queue_limits(ctrl
, ns
->queue
);
1371 disk
->major
= nvme_major
;
1372 disk
->first_minor
= 0;
1373 disk
->fops
= &nvme_fops
;
1374 disk
->private_data
= ns
;
1375 disk
->queue
= ns
->queue
;
1376 disk
->driverfs_dev
= ctrl
->device
;
1377 disk
->flags
= GENHD_FL_EXT_DEVT
;
1378 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->instance
);
1380 if (nvme_revalidate_disk(ns
->disk
))
1383 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
1384 kref_get(&ctrl
->kref
);
1385 if (ns
->type
== NVME_NS_LIGHTNVM
)
1389 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
1390 &nvme_ns_attr_group
))
1391 pr_warn("%s: failed to create sysfs group for identification\n",
1392 ns
->disk
->disk_name
);
1397 blk_cleanup_queue(ns
->queue
);
1398 out_release_instance
:
1399 ida_simple_remove(&ctrl
->ns_ida
, ns
->instance
);
1404 static void nvme_ns_remove(struct nvme_ns
*ns
)
1406 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
1409 if (ns
->disk
->flags
& GENHD_FL_UP
) {
1410 if (blk_get_integrity(ns
->disk
))
1411 blk_integrity_unregister(ns
->disk
);
1412 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
1413 &nvme_ns_attr_group
);
1414 del_gendisk(ns
->disk
);
1415 blk_mq_abort_requeue_list(ns
->queue
);
1416 blk_cleanup_queue(ns
->queue
);
1418 mutex_lock(&ns
->ctrl
->namespaces_mutex
);
1419 list_del_init(&ns
->list
);
1420 mutex_unlock(&ns
->ctrl
->namespaces_mutex
);
1424 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1428 ns
= nvme_find_ns(ctrl
, nsid
);
1430 if (revalidate_disk(ns
->disk
))
1433 nvme_alloc_ns(ctrl
, nsid
);
1436 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
1440 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
1443 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
1447 for (i
= 0; i
< num_lists
; i
++) {
1448 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
1452 for (j
= 0; j
< min(nn
, 1024U); j
++) {
1453 nsid
= le32_to_cpu(ns_list
[j
]);
1457 nvme_validate_ns(ctrl
, nsid
);
1459 while (++prev
< nsid
) {
1460 ns
= nvme_find_ns(ctrl
, prev
);
1472 static void __nvme_scan_namespaces(struct nvme_ctrl
*ctrl
, unsigned nn
)
1474 struct nvme_ns
*ns
, *next
;
1477 lockdep_assert_held(&ctrl
->namespaces_mutex
);
1479 for (i
= 1; i
<= nn
; i
++)
1480 nvme_validate_ns(ctrl
, i
);
1482 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
1488 void nvme_scan_namespaces(struct nvme_ctrl
*ctrl
)
1490 struct nvme_id_ctrl
*id
;
1493 if (nvme_identify_ctrl(ctrl
, &id
))
1496 mutex_lock(&ctrl
->namespaces_mutex
);
1497 nn
= le32_to_cpu(id
->nn
);
1498 if (ctrl
->vs
>= NVME_VS(1, 1) &&
1499 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
1500 if (!nvme_scan_ns_list(ctrl
, nn
))
1503 __nvme_scan_namespaces(ctrl
, le32_to_cpup(&id
->nn
));
1505 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
1506 mutex_unlock(&ctrl
->namespaces_mutex
);
1509 EXPORT_SYMBOL_GPL(nvme_scan_namespaces
);
1511 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
1513 struct nvme_ns
*ns
, *next
;
1515 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
1518 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
1520 static DEFINE_IDA(nvme_instance_ida
);
1522 static int nvme_set_instance(struct nvme_ctrl
*ctrl
)
1524 int instance
, error
;
1527 if (!ida_pre_get(&nvme_instance_ida
, GFP_KERNEL
))
1530 spin_lock(&dev_list_lock
);
1531 error
= ida_get_new(&nvme_instance_ida
, &instance
);
1532 spin_unlock(&dev_list_lock
);
1533 } while (error
== -EAGAIN
);
1538 ctrl
->instance
= instance
;
1542 static void nvme_release_instance(struct nvme_ctrl
*ctrl
)
1544 spin_lock(&dev_list_lock
);
1545 ida_remove(&nvme_instance_ida
, ctrl
->instance
);
1546 spin_unlock(&dev_list_lock
);
1549 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
1551 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
1553 spin_lock(&dev_list_lock
);
1554 list_del(&ctrl
->node
);
1555 spin_unlock(&dev_list_lock
);
1557 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
1559 static void nvme_free_ctrl(struct kref
*kref
)
1561 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
1563 put_device(ctrl
->device
);
1564 nvme_release_instance(ctrl
);
1565 ida_destroy(&ctrl
->ns_ida
);
1567 ctrl
->ops
->free_ctrl(ctrl
);
1570 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
1572 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
1574 EXPORT_SYMBOL_GPL(nvme_put_ctrl
);
1577 * Initialize a NVMe controller structures. This needs to be called during
1578 * earliest initialization so that we have the initialized structured around
1581 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
1582 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
1586 INIT_LIST_HEAD(&ctrl
->namespaces
);
1587 mutex_init(&ctrl
->namespaces_mutex
);
1588 kref_init(&ctrl
->kref
);
1591 ctrl
->quirks
= quirks
;
1593 ret
= nvme_set_instance(ctrl
);
1597 ctrl
->device
= device_create_with_groups(nvme_class
, ctrl
->dev
,
1598 MKDEV(nvme_char_major
, ctrl
->instance
),
1599 ctrl
, nvme_dev_attr_groups
,
1600 "nvme%d", ctrl
->instance
);
1601 if (IS_ERR(ctrl
->device
)) {
1602 ret
= PTR_ERR(ctrl
->device
);
1603 goto out_release_instance
;
1605 get_device(ctrl
->device
);
1606 ida_init(&ctrl
->ns_ida
);
1608 spin_lock(&dev_list_lock
);
1609 list_add_tail(&ctrl
->node
, &nvme_ctrl_list
);
1610 spin_unlock(&dev_list_lock
);
1613 out_release_instance
:
1614 nvme_release_instance(ctrl
);
1618 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
1621 * nvme_kill_queues(): Ends all namespace queues
1622 * @ctrl: the dead controller that needs to end
1624 * Call this function when the driver determines it is unable to get the
1625 * controller in a state capable of servicing IO.
1627 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
1631 mutex_lock(&ctrl
->namespaces_mutex
);
1632 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1633 if (!kref_get_unless_zero(&ns
->kref
))
1637 * Revalidating a dead namespace sets capacity to 0. This will
1638 * end buffered writers dirtying pages that can't be synced.
1640 if (!test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
1641 revalidate_disk(ns
->disk
);
1643 blk_set_queue_dying(ns
->queue
);
1644 blk_mq_abort_requeue_list(ns
->queue
);
1645 blk_mq_start_stopped_hw_queues(ns
->queue
, true);
1649 mutex_unlock(&ctrl
->namespaces_mutex
);
1651 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
1653 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
1657 mutex_lock(&ctrl
->namespaces_mutex
);
1658 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1659 spin_lock_irq(ns
->queue
->queue_lock
);
1660 queue_flag_set(QUEUE_FLAG_STOPPED
, ns
->queue
);
1661 spin_unlock_irq(ns
->queue
->queue_lock
);
1663 blk_mq_cancel_requeue_work(ns
->queue
);
1664 blk_mq_stop_hw_queues(ns
->queue
);
1666 mutex_unlock(&ctrl
->namespaces_mutex
);
1668 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
1670 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
1674 mutex_lock(&ctrl
->namespaces_mutex
);
1675 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1676 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED
, ns
->queue
);
1677 blk_mq_start_stopped_hw_queues(ns
->queue
, true);
1678 blk_mq_kick_requeue_list(ns
->queue
);
1680 mutex_unlock(&ctrl
->namespaces_mutex
);
1682 EXPORT_SYMBOL_GPL(nvme_start_queues
);
1684 int __init
nvme_core_init(void)
1688 result
= register_blkdev(nvme_major
, "nvme");
1691 else if (result
> 0)
1692 nvme_major
= result
;
1694 result
= __register_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme",
1697 goto unregister_blkdev
;
1698 else if (result
> 0)
1699 nvme_char_major
= result
;
1701 nvme_class
= class_create(THIS_MODULE
, "nvme");
1702 if (IS_ERR(nvme_class
)) {
1703 result
= PTR_ERR(nvme_class
);
1704 goto unregister_chrdev
;
1710 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
1712 unregister_blkdev(nvme_major
, "nvme");
1716 void nvme_core_exit(void)
1718 unregister_blkdev(nvme_major
, "nvme");
1719 class_destroy(nvme_class
);
1720 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
1723 MODULE_LICENSE("GPL");
1724 MODULE_VERSION("1.0");
1725 module_init(nvme_core_init
);
1726 module_exit(nvme_core_exit
);