2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
30 #include <asm/unaligned.h>
35 #define NVME_MINORS (1U << MINORBITS)
37 unsigned char admin_timeout
= 60;
38 module_param(admin_timeout
, byte
, 0644);
39 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
40 EXPORT_SYMBOL_GPL(admin_timeout
);
42 unsigned char nvme_io_timeout
= 30;
43 module_param_named(io_timeout
, nvme_io_timeout
, byte
, 0644);
44 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
45 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
47 unsigned char shutdown_timeout
= 5;
48 module_param(shutdown_timeout
, byte
, 0644);
49 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
51 unsigned int nvme_max_retries
= 5;
52 module_param_named(max_retries
, nvme_max_retries
, uint
, 0644);
53 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
54 EXPORT_SYMBOL_GPL(nvme_max_retries
);
56 static int nvme_char_major
;
57 module_param(nvme_char_major
, int, 0);
59 static LIST_HEAD(nvme_ctrl_list
);
60 static DEFINE_SPINLOCK(dev_list_lock
);
62 static struct class *nvme_class
;
64 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
68 if (!blk_mq_request_started(req
))
71 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
72 "Cancelling I/O %d", req
->tag
);
74 status
= NVME_SC_ABORT_REQ
;
75 if (blk_queue_dying(req
->q
))
76 status
|= NVME_SC_DNR
;
77 blk_mq_complete_request(req
, status
);
79 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
81 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
82 enum nvme_ctrl_state new_state
)
84 enum nvme_ctrl_state old_state
;
87 spin_lock_irq(&ctrl
->lock
);
89 old_state
= ctrl
->state
;
94 case NVME_CTRL_RESETTING
:
95 case NVME_CTRL_RECONNECTING
:
102 case NVME_CTRL_RESETTING
:
106 case NVME_CTRL_RECONNECTING
:
113 case NVME_CTRL_RECONNECTING
:
122 case NVME_CTRL_DELETING
:
125 case NVME_CTRL_RESETTING
:
126 case NVME_CTRL_RECONNECTING
:
135 case NVME_CTRL_DELETING
:
147 ctrl
->state
= new_state
;
149 spin_unlock_irq(&ctrl
->lock
);
153 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
155 static void nvme_free_ns(struct kref
*kref
)
157 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
159 if (ns
->type
== NVME_NS_LIGHTNVM
)
160 nvme_nvm_unregister(ns
->queue
, ns
->disk
->disk_name
);
162 spin_lock(&dev_list_lock
);
163 ns
->disk
->private_data
= NULL
;
164 spin_unlock(&dev_list_lock
);
167 ida_simple_remove(&ns
->ctrl
->ns_ida
, ns
->instance
);
168 nvme_put_ctrl(ns
->ctrl
);
172 static void nvme_put_ns(struct nvme_ns
*ns
)
174 kref_put(&ns
->kref
, nvme_free_ns
);
177 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
181 spin_lock(&dev_list_lock
);
182 ns
= disk
->private_data
;
184 if (!kref_get_unless_zero(&ns
->kref
))
186 if (!try_module_get(ns
->ctrl
->ops
->module
))
189 spin_unlock(&dev_list_lock
);
194 kref_put(&ns
->kref
, nvme_free_ns
);
196 spin_unlock(&dev_list_lock
);
200 void nvme_requeue_req(struct request
*req
)
204 blk_mq_requeue_request(req
);
205 spin_lock_irqsave(req
->q
->queue_lock
, flags
);
206 if (!blk_queue_stopped(req
->q
))
207 blk_mq_kick_requeue_list(req
->q
);
208 spin_unlock_irqrestore(req
->q
->queue_lock
, flags
);
210 EXPORT_SYMBOL_GPL(nvme_requeue_req
);
212 struct request
*nvme_alloc_request(struct request_queue
*q
,
213 struct nvme_command
*cmd
, unsigned int flags
, int qid
)
217 if (qid
== NVME_QID_ANY
) {
218 req
= blk_mq_alloc_request(q
, nvme_is_write(cmd
), flags
);
220 req
= blk_mq_alloc_request_hctx(q
, nvme_is_write(cmd
), flags
,
226 req
->cmd_type
= REQ_TYPE_DRV_PRIV
;
227 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
228 req
->cmd
= (unsigned char *)cmd
;
229 req
->cmd_len
= sizeof(struct nvme_command
);
233 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
235 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
236 struct nvme_command
*cmnd
)
238 memset(cmnd
, 0, sizeof(*cmnd
));
239 cmnd
->common
.opcode
= nvme_cmd_flush
;
240 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
243 static inline int nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
244 struct nvme_command
*cmnd
)
246 struct nvme_dsm_range
*range
;
249 unsigned int nr_bytes
= blk_rq_bytes(req
);
251 range
= kmalloc(sizeof(*range
), GFP_ATOMIC
);
253 return BLK_MQ_RQ_QUEUE_BUSY
;
255 range
->cattr
= cpu_to_le32(0);
256 range
->nlb
= cpu_to_le32(nr_bytes
>> ns
->lba_shift
);
257 range
->slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
259 memset(cmnd
, 0, sizeof(*cmnd
));
260 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
261 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->ns_id
);
263 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
265 req
->completion_data
= range
;
266 page
= virt_to_page(range
);
267 offset
= offset_in_page(range
);
268 blk_add_request_payload(req
, page
, offset
, sizeof(*range
));
271 * we set __data_len back to the size of the area to be discarded
272 * on disk. This allows us to report completion on the full amount
273 * of blocks described by the request.
275 req
->__data_len
= nr_bytes
;
280 static inline void nvme_setup_rw(struct nvme_ns
*ns
, struct request
*req
,
281 struct nvme_command
*cmnd
)
286 if (req
->cmd_flags
& REQ_FUA
)
287 control
|= NVME_RW_FUA
;
288 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
289 control
|= NVME_RW_LR
;
291 if (req
->cmd_flags
& REQ_RAHEAD
)
292 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
294 memset(cmnd
, 0, sizeof(*cmnd
));
295 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
296 cmnd
->rw
.command_id
= req
->tag
;
297 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
298 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
299 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
302 switch (ns
->pi_type
) {
303 case NVME_NS_DPS_PI_TYPE3
:
304 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
306 case NVME_NS_DPS_PI_TYPE1
:
307 case NVME_NS_DPS_PI_TYPE2
:
308 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
309 NVME_RW_PRINFO_PRCHK_REF
;
310 cmnd
->rw
.reftag
= cpu_to_le32(
311 nvme_block_nr(ns
, blk_rq_pos(req
)));
314 if (!blk_integrity_rq(req
))
315 control
|= NVME_RW_PRINFO_PRACT
;
318 cmnd
->rw
.control
= cpu_to_le16(control
);
319 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
322 int nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
323 struct nvme_command
*cmd
)
327 if (req
->cmd_type
== REQ_TYPE_DRV_PRIV
)
328 memcpy(cmd
, req
->cmd
, sizeof(*cmd
));
329 else if (req_op(req
) == REQ_OP_FLUSH
)
330 nvme_setup_flush(ns
, cmd
);
331 else if (req_op(req
) == REQ_OP_DISCARD
)
332 ret
= nvme_setup_discard(ns
, req
, cmd
);
334 nvme_setup_rw(ns
, req
, cmd
);
338 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
341 * Returns 0 on success. If the result is negative, it's a Linux error code;
342 * if the result is positive, it's an NVM Express status code
344 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
345 struct nvme_completion
*cqe
, void *buffer
, unsigned bufflen
,
346 unsigned timeout
, int qid
, int at_head
, int flags
)
351 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
355 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
358 if (buffer
&& bufflen
) {
359 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
364 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
367 blk_mq_free_request(req
);
370 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
372 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
373 void *buffer
, unsigned bufflen
)
375 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
378 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
380 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
381 void __user
*ubuffer
, unsigned bufflen
,
382 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
383 u32
*result
, unsigned timeout
)
385 bool write
= nvme_is_write(cmd
);
386 struct nvme_completion cqe
;
387 struct nvme_ns
*ns
= q
->queuedata
;
388 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
390 struct bio
*bio
= NULL
;
394 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
398 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
401 if (ubuffer
&& bufflen
) {
402 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
410 bio
->bi_bdev
= bdget_disk(disk
, 0);
416 if (meta_buffer
&& meta_len
) {
417 struct bio_integrity_payload
*bip
;
419 meta
= kmalloc(meta_len
, GFP_KERNEL
);
426 if (copy_from_user(meta
, meta_buffer
,
433 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
439 bip
->bip_iter
.bi_size
= meta_len
;
440 bip
->bip_iter
.bi_sector
= meta_seed
;
442 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
443 meta_len
, offset_in_page(meta
));
444 if (ret
!= meta_len
) {
451 blk_execute_rq(req
->q
, disk
, req
, 0);
454 *result
= le32_to_cpu(cqe
.result
);
455 if (meta
&& !ret
&& !write
) {
456 if (copy_to_user(meta_buffer
, meta
, meta_len
))
463 if (disk
&& bio
->bi_bdev
)
465 blk_rq_unmap_user(bio
);
468 blk_mq_free_request(req
);
472 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
473 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
476 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
480 static void nvme_keep_alive_end_io(struct request
*rq
, int error
)
482 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
484 blk_mq_free_request(rq
);
487 dev_err(ctrl
->device
,
488 "failed nvme_keep_alive_end_io error=%d\n", error
);
492 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
495 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
497 struct nvme_command c
;
500 memset(&c
, 0, sizeof(c
));
501 c
.common
.opcode
= nvme_admin_keep_alive
;
503 rq
= nvme_alloc_request(ctrl
->admin_q
, &c
, BLK_MQ_REQ_RESERVED
,
508 rq
->timeout
= ctrl
->kato
* HZ
;
509 rq
->end_io_data
= ctrl
;
511 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
516 static void nvme_keep_alive_work(struct work_struct
*work
)
518 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
519 struct nvme_ctrl
, ka_work
);
521 if (nvme_keep_alive(ctrl
)) {
522 /* allocation failure, reset the controller */
523 dev_err(ctrl
->device
, "keep-alive failed\n");
524 ctrl
->ops
->reset_ctrl(ctrl
);
529 void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
531 if (unlikely(ctrl
->kato
== 0))
534 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
535 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
537 EXPORT_SYMBOL_GPL(nvme_start_keep_alive
);
539 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
541 if (unlikely(ctrl
->kato
== 0))
544 cancel_delayed_work_sync(&ctrl
->ka_work
);
546 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
548 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
550 struct nvme_command c
= { };
553 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
554 c
.identify
.opcode
= nvme_admin_identify
;
555 c
.identify
.cns
= cpu_to_le32(1);
557 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
561 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
562 sizeof(struct nvme_id_ctrl
));
568 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
570 struct nvme_command c
= { };
572 c
.identify
.opcode
= nvme_admin_identify
;
573 c
.identify
.cns
= cpu_to_le32(2);
574 c
.identify
.nsid
= cpu_to_le32(nsid
);
575 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
578 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
579 struct nvme_id_ns
**id
)
581 struct nvme_command c
= { };
584 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
585 c
.identify
.opcode
= nvme_admin_identify
,
586 c
.identify
.nsid
= cpu_to_le32(nsid
),
588 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
592 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
593 sizeof(struct nvme_id_ns
));
599 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
600 dma_addr_t dma_addr
, u32
*result
)
602 struct nvme_command c
;
603 struct nvme_completion cqe
;
606 memset(&c
, 0, sizeof(c
));
607 c
.features
.opcode
= nvme_admin_get_features
;
608 c
.features
.nsid
= cpu_to_le32(nsid
);
609 c
.features
.dptr
.prp1
= cpu_to_le64(dma_addr
);
610 c
.features
.fid
= cpu_to_le32(fid
);
612 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &cqe
, NULL
, 0, 0,
614 if (ret
>= 0 && result
)
615 *result
= le32_to_cpu(cqe
.result
);
619 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
620 dma_addr_t dma_addr
, u32
*result
)
622 struct nvme_command c
;
623 struct nvme_completion cqe
;
626 memset(&c
, 0, sizeof(c
));
627 c
.features
.opcode
= nvme_admin_set_features
;
628 c
.features
.dptr
.prp1
= cpu_to_le64(dma_addr
);
629 c
.features
.fid
= cpu_to_le32(fid
);
630 c
.features
.dword11
= cpu_to_le32(dword11
);
632 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &cqe
, NULL
, 0, 0,
634 if (ret
>= 0 && result
)
635 *result
= le32_to_cpu(cqe
.result
);
639 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
)
641 struct nvme_command c
= { };
644 c
.common
.opcode
= nvme_admin_get_log_page
,
645 c
.common
.nsid
= cpu_to_le32(0xFFFFFFFF),
646 c
.common
.cdw10
[0] = cpu_to_le32(
647 (((sizeof(struct nvme_smart_log
) / 4) - 1) << 16) |
650 *log
= kmalloc(sizeof(struct nvme_smart_log
), GFP_KERNEL
);
654 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *log
,
655 sizeof(struct nvme_smart_log
));
661 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
663 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
665 int status
, nr_io_queues
;
667 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, 0,
673 * Degraded controllers might return an error when setting the queue
674 * count. We still want to be able to bring them online and offer
675 * access to the admin queue, as that might be only way to fix them up.
678 dev_err(ctrl
->dev
, "Could not set queue count (%d)\n", status
);
681 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
682 *count
= min(*count
, nr_io_queues
);
687 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
689 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
691 struct nvme_user_io io
;
692 struct nvme_command c
;
693 unsigned length
, meta_len
;
694 void __user
*metadata
;
696 if (copy_from_user(&io
, uio
, sizeof(io
)))
704 case nvme_cmd_compare
:
710 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
711 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
712 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
717 } else if (meta_len
) {
718 if ((io
.metadata
& 3) || !io
.metadata
)
722 memset(&c
, 0, sizeof(c
));
723 c
.rw
.opcode
= io
.opcode
;
724 c
.rw
.flags
= io
.flags
;
725 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
726 c
.rw
.slba
= cpu_to_le64(io
.slba
);
727 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
728 c
.rw
.control
= cpu_to_le16(io
.control
);
729 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
730 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
731 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
732 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
734 return __nvme_submit_user_cmd(ns
->queue
, &c
,
735 (void __user
*)(uintptr_t)io
.addr
, length
,
736 metadata
, meta_len
, io
.slba
, NULL
, 0);
739 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
740 struct nvme_passthru_cmd __user
*ucmd
)
742 struct nvme_passthru_cmd cmd
;
743 struct nvme_command c
;
744 unsigned timeout
= 0;
747 if (!capable(CAP_SYS_ADMIN
))
749 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
754 memset(&c
, 0, sizeof(c
));
755 c
.common
.opcode
= cmd
.opcode
;
756 c
.common
.flags
= cmd
.flags
;
757 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
758 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
759 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
760 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
761 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
762 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
763 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
764 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
765 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
768 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
770 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
771 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
772 &cmd
.result
, timeout
);
774 if (put_user(cmd
.result
, &ucmd
->result
))
781 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
782 unsigned int cmd
, unsigned long arg
)
784 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
788 force_successful_syscall_return();
790 case NVME_IOCTL_ADMIN_CMD
:
791 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
792 case NVME_IOCTL_IO_CMD
:
793 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
794 case NVME_IOCTL_SUBMIT_IO
:
795 return nvme_submit_io(ns
, (void __user
*)arg
);
796 #ifdef CONFIG_BLK_DEV_NVME_SCSI
797 case SG_GET_VERSION_NUM
:
798 return nvme_sg_get_version_num((void __user
*)arg
);
800 return nvme_sg_io(ns
, (void __user
*)arg
);
808 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
809 unsigned int cmd
, unsigned long arg
)
815 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
818 #define nvme_compat_ioctl NULL
821 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
823 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
826 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
828 struct nvme_ns
*ns
= disk
->private_data
;
830 module_put(ns
->ctrl
->ops
->module
);
834 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
836 /* some standard values */
838 geo
->sectors
= 1 << 5;
839 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
843 #ifdef CONFIG_BLK_DEV_INTEGRITY
844 static void nvme_init_integrity(struct nvme_ns
*ns
)
846 struct blk_integrity integrity
;
848 memset(&integrity
, 0, sizeof(integrity
));
849 switch (ns
->pi_type
) {
850 case NVME_NS_DPS_PI_TYPE3
:
851 integrity
.profile
= &t10_pi_type3_crc
;
852 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
853 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
855 case NVME_NS_DPS_PI_TYPE1
:
856 case NVME_NS_DPS_PI_TYPE2
:
857 integrity
.profile
= &t10_pi_type1_crc
;
858 integrity
.tag_size
= sizeof(u16
);
859 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
862 integrity
.profile
= NULL
;
865 integrity
.tuple_size
= ns
->ms
;
866 blk_integrity_register(ns
->disk
, &integrity
);
867 blk_queue_max_integrity_segments(ns
->queue
, 1);
870 static void nvme_init_integrity(struct nvme_ns
*ns
)
873 #endif /* CONFIG_BLK_DEV_INTEGRITY */
875 static void nvme_config_discard(struct nvme_ns
*ns
)
877 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
878 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
880 if (ctrl
->quirks
& NVME_QUIRK_DISCARD_ZEROES
)
881 ns
->queue
->limits
.discard_zeroes_data
= 1;
883 ns
->queue
->limits
.discard_zeroes_data
= 0;
885 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
886 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
887 blk_queue_max_discard_sectors(ns
->queue
, UINT_MAX
);
888 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
891 static int nvme_revalidate_disk(struct gendisk
*disk
)
893 struct nvme_ns
*ns
= disk
->private_data
;
894 struct nvme_id_ns
*id
;
899 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
900 set_capacity(disk
, 0);
903 if (nvme_identify_ns(ns
->ctrl
, ns
->ns_id
, &id
)) {
904 dev_warn(disk_to_dev(ns
->disk
), "%s: Identify failure\n",
913 if (nvme_nvm_ns_supported(ns
, id
) && ns
->type
!= NVME_NS_LIGHTNVM
) {
914 if (nvme_nvm_register(ns
->queue
, disk
->disk_name
)) {
915 dev_warn(disk_to_dev(ns
->disk
),
916 "%s: LightNVM init failure\n", __func__
);
920 ns
->type
= NVME_NS_LIGHTNVM
;
923 if (ns
->ctrl
->vs
>= NVME_VS(1, 1))
924 memcpy(ns
->eui
, id
->eui64
, sizeof(ns
->eui
));
925 if (ns
->ctrl
->vs
>= NVME_VS(1, 2))
926 memcpy(ns
->uuid
, id
->nguid
, sizeof(ns
->uuid
));
929 lbaf
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
930 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
931 ns
->ms
= le16_to_cpu(id
->lbaf
[lbaf
].ms
);
932 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
935 * If identify namespace failed, use default 512 byte block size so
936 * block layer can use before failing read/write for 0 capacity.
938 if (ns
->lba_shift
== 0)
940 bs
= 1 << ns
->lba_shift
;
941 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
942 pi_type
= ns
->ms
== sizeof(struct t10_pi_tuple
) ?
943 id
->dps
& NVME_NS_DPS_PI_MASK
: 0;
945 blk_mq_freeze_queue(disk
->queue
);
946 if (blk_get_integrity(disk
) && (ns
->pi_type
!= pi_type
||
948 bs
!= queue_logical_block_size(disk
->queue
) ||
949 (ns
->ms
&& ns
->ext
)))
950 blk_integrity_unregister(disk
);
952 ns
->pi_type
= pi_type
;
953 blk_queue_logical_block_size(ns
->queue
, bs
);
955 if (ns
->ms
&& !blk_get_integrity(disk
) && !ns
->ext
)
956 nvme_init_integrity(ns
);
957 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
958 set_capacity(disk
, 0);
960 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
962 if (ns
->ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
963 nvme_config_discard(ns
);
964 blk_mq_unfreeze_queue(disk
->queue
);
970 static char nvme_pr_type(enum pr_type type
)
973 case PR_WRITE_EXCLUSIVE
:
975 case PR_EXCLUSIVE_ACCESS
:
977 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
979 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
981 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
983 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
990 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
991 u64 key
, u64 sa_key
, u8 op
)
993 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
994 struct nvme_command c
;
995 u8 data
[16] = { 0, };
997 put_unaligned_le64(key
, &data
[0]);
998 put_unaligned_le64(sa_key
, &data
[8]);
1000 memset(&c
, 0, sizeof(c
));
1001 c
.common
.opcode
= op
;
1002 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
1003 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
1005 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1008 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1009 u64
new, unsigned flags
)
1013 if (flags
& ~PR_FL_IGNORE_KEY
)
1016 cdw10
= old
? 2 : 0;
1017 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1018 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1019 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1022 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1023 enum pr_type type
, unsigned flags
)
1027 if (flags
& ~PR_FL_IGNORE_KEY
)
1030 cdw10
= nvme_pr_type(type
) << 8;
1031 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1032 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1035 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1036 enum pr_type type
, bool abort
)
1038 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
1039 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1042 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1044 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1045 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1048 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1050 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
1051 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1054 static const struct pr_ops nvme_pr_ops
= {
1055 .pr_register
= nvme_pr_register
,
1056 .pr_reserve
= nvme_pr_reserve
,
1057 .pr_release
= nvme_pr_release
,
1058 .pr_preempt
= nvme_pr_preempt
,
1059 .pr_clear
= nvme_pr_clear
,
1062 static const struct block_device_operations nvme_fops
= {
1063 .owner
= THIS_MODULE
,
1064 .ioctl
= nvme_ioctl
,
1065 .compat_ioctl
= nvme_compat_ioctl
,
1067 .release
= nvme_release
,
1068 .getgeo
= nvme_getgeo
,
1069 .revalidate_disk
= nvme_revalidate_disk
,
1070 .pr_ops
= &nvme_pr_ops
,
1073 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
1075 unsigned long timeout
=
1076 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
1077 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
1080 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1081 if ((csts
& NVME_CSTS_RDY
) == bit
)
1085 if (fatal_signal_pending(current
))
1087 if (time_after(jiffies
, timeout
)) {
1088 dev_err(ctrl
->device
,
1089 "Device not ready; aborting %s\n", enabled
?
1090 "initialisation" : "reset");
1099 * If the device has been passed off to us in an enabled state, just clear
1100 * the enabled bit. The spec says we should set the 'shutdown notification
1101 * bits', but doing so may cause the device to complete commands to the
1102 * admin queue ... and we don't know what memory that might be pointing at!
1104 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1108 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1109 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
1111 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1115 /* Checking for ctrl->tagset is a trick to avoid sleeping on module
1116 * load, since we only need the quirk on reset_controller. Notice
1117 * that the HGST device needs this delay only in firmware activation
1118 * procedure; unfortunately we have no (easy) way to verify this.
1120 if ((ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
) && ctrl
->tagset
)
1121 msleep(NVME_QUIRK_DELAY_AMOUNT
);
1123 return nvme_wait_ready(ctrl
, cap
, false);
1125 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
1127 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1130 * Default to a 4K page size, with the intention to update this
1131 * path in the future to accomodate architectures with differing
1132 * kernel and IO page sizes.
1134 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
1137 if (page_shift
< dev_page_min
) {
1138 dev_err(ctrl
->device
,
1139 "Minimum device page size %u too large for host (%u)\n",
1140 1 << dev_page_min
, 1 << page_shift
);
1144 ctrl
->page_size
= 1 << page_shift
;
1146 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
1147 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
1148 ctrl
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
1149 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
1150 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
1152 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1155 return nvme_wait_ready(ctrl
, cap
, true);
1157 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
1159 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
1161 unsigned long timeout
= SHUTDOWN_TIMEOUT
+ jiffies
;
1165 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1166 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
1168 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1172 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1173 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
1177 if (fatal_signal_pending(current
))
1179 if (time_after(jiffies
, timeout
)) {
1180 dev_err(ctrl
->device
,
1181 "Device shutdown incomplete; abort shutdown\n");
1188 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
1190 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1191 struct request_queue
*q
)
1195 if (ctrl
->max_hw_sectors
) {
1197 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1199 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1200 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1202 if (ctrl
->stripe_size
)
1203 blk_queue_chunk_sectors(q
, ctrl
->stripe_size
>> 9);
1204 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1205 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1207 blk_queue_write_cache(q
, vwc
, vwc
);
1211 * Initialize the cached copies of the Identify data and various controller
1212 * register in our nvme_ctrl structure. This should be called as soon as
1213 * the admin queue is fully up and running.
1215 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
1217 struct nvme_id_ctrl
*id
;
1219 int ret
, page_shift
;
1222 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
1224 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
1228 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
1230 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
1233 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
1235 if (ctrl
->vs
>= NVME_VS(1, 1))
1236 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
1238 ret
= nvme_identify_ctrl(ctrl
, &id
);
1240 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
1244 ctrl
->vid
= le16_to_cpu(id
->vid
);
1245 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
1246 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
1247 ctrl
->vwc
= id
->vwc
;
1248 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
1249 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
1250 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
1251 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
1253 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
1255 max_hw_sectors
= UINT_MAX
;
1256 ctrl
->max_hw_sectors
=
1257 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
1259 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) && id
->vs
[3]) {
1260 unsigned int max_hw_sectors
;
1262 ctrl
->stripe_size
= 1 << (id
->vs
[3] + page_shift
);
1263 max_hw_sectors
= ctrl
->stripe_size
>> (page_shift
- 9);
1264 if (ctrl
->max_hw_sectors
) {
1265 ctrl
->max_hw_sectors
= min(max_hw_sectors
,
1266 ctrl
->max_hw_sectors
);
1268 ctrl
->max_hw_sectors
= max_hw_sectors
;
1272 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
1273 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
1274 ctrl
->kas
= le16_to_cpu(id
->kas
);
1276 if (ctrl
->ops
->is_fabrics
) {
1277 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
1278 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
1279 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
1280 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
1283 * In fabrics we need to verify the cntlid matches the
1286 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
))
1289 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
1291 "keep-alive support is mandatory for fabrics\n");
1295 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
1301 EXPORT_SYMBOL_GPL(nvme_init_identify
);
1303 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
1305 struct nvme_ctrl
*ctrl
;
1306 int instance
= iminor(inode
);
1309 spin_lock(&dev_list_lock
);
1310 list_for_each_entry(ctrl
, &nvme_ctrl_list
, node
) {
1311 if (ctrl
->instance
!= instance
)
1314 if (!ctrl
->admin_q
) {
1318 if (!kref_get_unless_zero(&ctrl
->kref
))
1320 file
->private_data
= ctrl
;
1324 spin_unlock(&dev_list_lock
);
1329 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
1331 nvme_put_ctrl(file
->private_data
);
1335 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
1340 mutex_lock(&ctrl
->namespaces_mutex
);
1341 if (list_empty(&ctrl
->namespaces
)) {
1346 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
1347 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
1348 dev_warn(ctrl
->device
,
1349 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1354 dev_warn(ctrl
->device
,
1355 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1356 kref_get(&ns
->kref
);
1357 mutex_unlock(&ctrl
->namespaces_mutex
);
1359 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
1364 mutex_unlock(&ctrl
->namespaces_mutex
);
1368 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
1371 struct nvme_ctrl
*ctrl
= file
->private_data
;
1372 void __user
*argp
= (void __user
*)arg
;
1375 case NVME_IOCTL_ADMIN_CMD
:
1376 return nvme_user_cmd(ctrl
, NULL
, argp
);
1377 case NVME_IOCTL_IO_CMD
:
1378 return nvme_dev_user_cmd(ctrl
, argp
);
1379 case NVME_IOCTL_RESET
:
1380 dev_warn(ctrl
->device
, "resetting controller\n");
1381 return ctrl
->ops
->reset_ctrl(ctrl
);
1382 case NVME_IOCTL_SUBSYS_RESET
:
1383 return nvme_reset_subsystem(ctrl
);
1384 case NVME_IOCTL_RESCAN
:
1385 nvme_queue_scan(ctrl
);
1392 static const struct file_operations nvme_dev_fops
= {
1393 .owner
= THIS_MODULE
,
1394 .open
= nvme_dev_open
,
1395 .release
= nvme_dev_release
,
1396 .unlocked_ioctl
= nvme_dev_ioctl
,
1397 .compat_ioctl
= nvme_dev_ioctl
,
1400 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
1401 struct device_attribute
*attr
, const char *buf
,
1404 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1407 ret
= ctrl
->ops
->reset_ctrl(ctrl
);
1412 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
1414 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
1415 struct device_attribute
*attr
, const char *buf
,
1418 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1420 nvme_queue_scan(ctrl
);
1423 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
1425 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
1428 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1429 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1430 int serial_len
= sizeof(ctrl
->serial
);
1431 int model_len
= sizeof(ctrl
->model
);
1433 if (memchr_inv(ns
->uuid
, 0, sizeof(ns
->uuid
)))
1434 return sprintf(buf
, "eui.%16phN\n", ns
->uuid
);
1436 if (memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1437 return sprintf(buf
, "eui.%8phN\n", ns
->eui
);
1439 while (ctrl
->serial
[serial_len
- 1] == ' ')
1441 while (ctrl
->model
[model_len
- 1] == ' ')
1444 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl
->vid
,
1445 serial_len
, ctrl
->serial
, model_len
, ctrl
->model
, ns
->ns_id
);
1447 static DEVICE_ATTR(wwid
, S_IRUGO
, wwid_show
, NULL
);
1449 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
1452 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1453 return sprintf(buf
, "%pU\n", ns
->uuid
);
1455 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
1457 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
1460 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1461 return sprintf(buf
, "%8phd\n", ns
->eui
);
1463 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
1465 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
1468 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1469 return sprintf(buf
, "%d\n", ns
->ns_id
);
1471 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
1473 static struct attribute
*nvme_ns_attrs
[] = {
1474 &dev_attr_wwid
.attr
,
1475 &dev_attr_uuid
.attr
,
1477 &dev_attr_nsid
.attr
,
1481 static umode_t
nvme_ns_attrs_are_visible(struct kobject
*kobj
,
1482 struct attribute
*a
, int n
)
1484 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1485 struct nvme_ns
*ns
= dev_to_disk(dev
)->private_data
;
1487 if (a
== &dev_attr_uuid
.attr
) {
1488 if (!memchr_inv(ns
->uuid
, 0, sizeof(ns
->uuid
)))
1491 if (a
== &dev_attr_eui
.attr
) {
1492 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1498 static const struct attribute_group nvme_ns_attr_group
= {
1499 .attrs
= nvme_ns_attrs
,
1500 .is_visible
= nvme_ns_attrs_are_visible
,
1503 #define nvme_show_str_function(field) \
1504 static ssize_t field##_show(struct device *dev, \
1505 struct device_attribute *attr, char *buf) \
1507 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1508 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
1510 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1512 #define nvme_show_int_function(field) \
1513 static ssize_t field##_show(struct device *dev, \
1514 struct device_attribute *attr, char *buf) \
1516 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1517 return sprintf(buf, "%d\n", ctrl->field); \
1519 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1521 nvme_show_str_function(model
);
1522 nvme_show_str_function(serial
);
1523 nvme_show_str_function(firmware_rev
);
1524 nvme_show_int_function(cntlid
);
1526 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
1527 struct device_attribute
*attr
, const char *buf
,
1530 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1532 if (device_remove_file_self(dev
, attr
))
1533 ctrl
->ops
->delete_ctrl(ctrl
);
1536 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
1538 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
1539 struct device_attribute
*attr
,
1542 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1544 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
1546 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
1548 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
1549 struct device_attribute
*attr
,
1552 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1554 return snprintf(buf
, PAGE_SIZE
, "%s\n",
1555 ctrl
->ops
->get_subsysnqn(ctrl
));
1557 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
1559 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
1560 struct device_attribute
*attr
,
1563 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1565 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
1567 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
1569 static struct attribute
*nvme_dev_attrs
[] = {
1570 &dev_attr_reset_controller
.attr
,
1571 &dev_attr_rescan_controller
.attr
,
1572 &dev_attr_model
.attr
,
1573 &dev_attr_serial
.attr
,
1574 &dev_attr_firmware_rev
.attr
,
1575 &dev_attr_cntlid
.attr
,
1576 &dev_attr_delete_controller
.attr
,
1577 &dev_attr_transport
.attr
,
1578 &dev_attr_subsysnqn
.attr
,
1579 &dev_attr_address
.attr
,
1583 #define CHECK_ATTR(ctrl, a, name) \
1584 if ((a) == &dev_attr_##name.attr && \
1585 !(ctrl)->ops->get_##name) \
1588 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
1589 struct attribute
*a
, int n
)
1591 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1592 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1594 if (a
== &dev_attr_delete_controller
.attr
) {
1595 if (!ctrl
->ops
->delete_ctrl
)
1599 CHECK_ATTR(ctrl
, a
, subsysnqn
);
1600 CHECK_ATTR(ctrl
, a
, address
);
1605 static struct attribute_group nvme_dev_attrs_group
= {
1606 .attrs
= nvme_dev_attrs
,
1607 .is_visible
= nvme_dev_attrs_are_visible
,
1610 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
1611 &nvme_dev_attrs_group
,
1615 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1617 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
1618 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
1620 return nsa
->ns_id
- nsb
->ns_id
;
1623 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1625 struct nvme_ns
*ns
, *ret
= NULL
;
1627 mutex_lock(&ctrl
->namespaces_mutex
);
1628 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1629 if (ns
->ns_id
== nsid
) {
1630 kref_get(&ns
->kref
);
1634 if (ns
->ns_id
> nsid
)
1637 mutex_unlock(&ctrl
->namespaces_mutex
);
1641 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1644 struct gendisk
*disk
;
1645 int node
= dev_to_node(ctrl
->dev
);
1647 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
1651 ns
->instance
= ida_simple_get(&ctrl
->ns_ida
, 1, 0, GFP_KERNEL
);
1652 if (ns
->instance
< 0)
1655 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
1656 if (IS_ERR(ns
->queue
))
1657 goto out_release_instance
;
1658 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
1659 ns
->queue
->queuedata
= ns
;
1662 disk
= alloc_disk_node(0, node
);
1664 goto out_free_queue
;
1666 kref_init(&ns
->kref
);
1669 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
1672 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
1673 nvme_set_queue_limits(ctrl
, ns
->queue
);
1675 disk
->fops
= &nvme_fops
;
1676 disk
->private_data
= ns
;
1677 disk
->queue
= ns
->queue
;
1678 disk
->flags
= GENHD_FL_EXT_DEVT
;
1679 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->instance
);
1681 if (nvme_revalidate_disk(ns
->disk
))
1684 mutex_lock(&ctrl
->namespaces_mutex
);
1685 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
1686 mutex_unlock(&ctrl
->namespaces_mutex
);
1688 kref_get(&ctrl
->kref
);
1689 if (ns
->type
== NVME_NS_LIGHTNVM
)
1692 device_add_disk(ctrl
->device
, ns
->disk
);
1693 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
1694 &nvme_ns_attr_group
))
1695 pr_warn("%s: failed to create sysfs group for identification\n",
1696 ns
->disk
->disk_name
);
1701 blk_cleanup_queue(ns
->queue
);
1702 out_release_instance
:
1703 ida_simple_remove(&ctrl
->ns_ida
, ns
->instance
);
1708 static void nvme_ns_remove(struct nvme_ns
*ns
)
1710 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
1713 if (ns
->disk
->flags
& GENHD_FL_UP
) {
1714 if (blk_get_integrity(ns
->disk
))
1715 blk_integrity_unregister(ns
->disk
);
1716 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
1717 &nvme_ns_attr_group
);
1718 del_gendisk(ns
->disk
);
1719 blk_mq_abort_requeue_list(ns
->queue
);
1720 blk_cleanup_queue(ns
->queue
);
1723 mutex_lock(&ns
->ctrl
->namespaces_mutex
);
1724 list_del_init(&ns
->list
);
1725 mutex_unlock(&ns
->ctrl
->namespaces_mutex
);
1730 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
1734 ns
= nvme_find_get_ns(ctrl
, nsid
);
1736 if (revalidate_disk(ns
->disk
))
1740 nvme_alloc_ns(ctrl
, nsid
);
1743 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
1746 struct nvme_ns
*ns
, *next
;
1748 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
1749 if (ns
->ns_id
> nsid
)
1754 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
1758 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
1761 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
1765 for (i
= 0; i
< num_lists
; i
++) {
1766 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
1770 for (j
= 0; j
< min(nn
, 1024U); j
++) {
1771 nsid
= le32_to_cpu(ns_list
[j
]);
1775 nvme_validate_ns(ctrl
, nsid
);
1777 while (++prev
< nsid
) {
1778 ns
= nvme_find_get_ns(ctrl
, prev
);
1788 nvme_remove_invalid_namespaces(ctrl
, prev
);
1794 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
1798 for (i
= 1; i
<= nn
; i
++)
1799 nvme_validate_ns(ctrl
, i
);
1801 nvme_remove_invalid_namespaces(ctrl
, nn
);
1804 static void nvme_scan_work(struct work_struct
*work
)
1806 struct nvme_ctrl
*ctrl
=
1807 container_of(work
, struct nvme_ctrl
, scan_work
);
1808 struct nvme_id_ctrl
*id
;
1811 if (ctrl
->state
!= NVME_CTRL_LIVE
)
1814 if (nvme_identify_ctrl(ctrl
, &id
))
1817 nn
= le32_to_cpu(id
->nn
);
1818 if (ctrl
->vs
>= NVME_VS(1, 1) &&
1819 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
1820 if (!nvme_scan_ns_list(ctrl
, nn
))
1823 nvme_scan_ns_sequential(ctrl
, nn
);
1825 mutex_lock(&ctrl
->namespaces_mutex
);
1826 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
1827 mutex_unlock(&ctrl
->namespaces_mutex
);
1830 if (ctrl
->ops
->post_scan
)
1831 ctrl
->ops
->post_scan(ctrl
);
1834 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
1837 * Do not queue new scan work when a controller is reset during
1840 if (ctrl
->state
== NVME_CTRL_LIVE
)
1841 schedule_work(&ctrl
->scan_work
);
1843 EXPORT_SYMBOL_GPL(nvme_queue_scan
);
1846 * This function iterates the namespace list unlocked to allow recovery from
1847 * controller failure. It is up to the caller to ensure the namespace list is
1848 * not modified by scan work while this function is executing.
1850 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
1852 struct nvme_ns
*ns
, *next
;
1855 * The dead states indicates the controller was not gracefully
1856 * disconnected. In that case, we won't be able to flush any data while
1857 * removing the namespaces' disks; fail all the queues now to avoid
1858 * potentially having to clean up the failed sync later.
1860 if (ctrl
->state
== NVME_CTRL_DEAD
)
1861 nvme_kill_queues(ctrl
);
1863 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
1866 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
1868 static void nvme_async_event_work(struct work_struct
*work
)
1870 struct nvme_ctrl
*ctrl
=
1871 container_of(work
, struct nvme_ctrl
, async_event_work
);
1873 spin_lock_irq(&ctrl
->lock
);
1874 while (ctrl
->event_limit
> 0) {
1875 int aer_idx
= --ctrl
->event_limit
;
1877 spin_unlock_irq(&ctrl
->lock
);
1878 ctrl
->ops
->submit_async_event(ctrl
, aer_idx
);
1879 spin_lock_irq(&ctrl
->lock
);
1881 spin_unlock_irq(&ctrl
->lock
);
1884 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
,
1885 struct nvme_completion
*cqe
)
1887 u16 status
= le16_to_cpu(cqe
->status
) >> 1;
1888 u32 result
= le32_to_cpu(cqe
->result
);
1890 if (status
== NVME_SC_SUCCESS
|| status
== NVME_SC_ABORT_REQ
) {
1891 ++ctrl
->event_limit
;
1892 schedule_work(&ctrl
->async_event_work
);
1895 if (status
!= NVME_SC_SUCCESS
)
1898 switch (result
& 0xff07) {
1899 case NVME_AER_NOTICE_NS_CHANGED
:
1900 dev_info(ctrl
->device
, "rescanning\n");
1901 nvme_queue_scan(ctrl
);
1904 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
1907 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
1909 void nvme_queue_async_events(struct nvme_ctrl
*ctrl
)
1911 ctrl
->event_limit
= NVME_NR_AERS
;
1912 schedule_work(&ctrl
->async_event_work
);
1914 EXPORT_SYMBOL_GPL(nvme_queue_async_events
);
1916 static DEFINE_IDA(nvme_instance_ida
);
1918 static int nvme_set_instance(struct nvme_ctrl
*ctrl
)
1920 int instance
, error
;
1923 if (!ida_pre_get(&nvme_instance_ida
, GFP_KERNEL
))
1926 spin_lock(&dev_list_lock
);
1927 error
= ida_get_new(&nvme_instance_ida
, &instance
);
1928 spin_unlock(&dev_list_lock
);
1929 } while (error
== -EAGAIN
);
1934 ctrl
->instance
= instance
;
1938 static void nvme_release_instance(struct nvme_ctrl
*ctrl
)
1940 spin_lock(&dev_list_lock
);
1941 ida_remove(&nvme_instance_ida
, ctrl
->instance
);
1942 spin_unlock(&dev_list_lock
);
1945 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
1947 flush_work(&ctrl
->async_event_work
);
1948 flush_work(&ctrl
->scan_work
);
1949 nvme_remove_namespaces(ctrl
);
1951 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
1953 spin_lock(&dev_list_lock
);
1954 list_del(&ctrl
->node
);
1955 spin_unlock(&dev_list_lock
);
1957 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
1959 static void nvme_free_ctrl(struct kref
*kref
)
1961 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
1963 put_device(ctrl
->device
);
1964 nvme_release_instance(ctrl
);
1965 ida_destroy(&ctrl
->ns_ida
);
1967 ctrl
->ops
->free_ctrl(ctrl
);
1970 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
1972 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
1974 EXPORT_SYMBOL_GPL(nvme_put_ctrl
);
1977 * Initialize a NVMe controller structures. This needs to be called during
1978 * earliest initialization so that we have the initialized structured around
1981 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
1982 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
1986 ctrl
->state
= NVME_CTRL_NEW
;
1987 spin_lock_init(&ctrl
->lock
);
1988 INIT_LIST_HEAD(&ctrl
->namespaces
);
1989 mutex_init(&ctrl
->namespaces_mutex
);
1990 kref_init(&ctrl
->kref
);
1993 ctrl
->quirks
= quirks
;
1994 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
1995 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
1997 ret
= nvme_set_instance(ctrl
);
2001 ctrl
->device
= device_create_with_groups(nvme_class
, ctrl
->dev
,
2002 MKDEV(nvme_char_major
, ctrl
->instance
),
2003 ctrl
, nvme_dev_attr_groups
,
2004 "nvme%d", ctrl
->instance
);
2005 if (IS_ERR(ctrl
->device
)) {
2006 ret
= PTR_ERR(ctrl
->device
);
2007 goto out_release_instance
;
2009 get_device(ctrl
->device
);
2010 ida_init(&ctrl
->ns_ida
);
2012 spin_lock(&dev_list_lock
);
2013 list_add_tail(&ctrl
->node
, &nvme_ctrl_list
);
2014 spin_unlock(&dev_list_lock
);
2017 out_release_instance
:
2018 nvme_release_instance(ctrl
);
2022 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
2025 * nvme_kill_queues(): Ends all namespace queues
2026 * @ctrl: the dead controller that needs to end
2028 * Call this function when the driver determines it is unable to get the
2029 * controller in a state capable of servicing IO.
2031 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
2035 mutex_lock(&ctrl
->namespaces_mutex
);
2036 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2038 * Revalidating a dead namespace sets capacity to 0. This will
2039 * end buffered writers dirtying pages that can't be synced.
2041 if (!test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
2042 revalidate_disk(ns
->disk
);
2044 blk_set_queue_dying(ns
->queue
);
2045 blk_mq_abort_requeue_list(ns
->queue
);
2046 blk_mq_start_stopped_hw_queues(ns
->queue
, true);
2048 mutex_unlock(&ctrl
->namespaces_mutex
);
2050 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
2052 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
2056 mutex_lock(&ctrl
->namespaces_mutex
);
2057 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2058 spin_lock_irq(ns
->queue
->queue_lock
);
2059 queue_flag_set(QUEUE_FLAG_STOPPED
, ns
->queue
);
2060 spin_unlock_irq(ns
->queue
->queue_lock
);
2062 blk_mq_cancel_requeue_work(ns
->queue
);
2063 blk_mq_stop_hw_queues(ns
->queue
);
2065 mutex_unlock(&ctrl
->namespaces_mutex
);
2067 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
2069 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
2073 mutex_lock(&ctrl
->namespaces_mutex
);
2074 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2075 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED
, ns
->queue
);
2076 blk_mq_start_stopped_hw_queues(ns
->queue
, true);
2077 blk_mq_kick_requeue_list(ns
->queue
);
2079 mutex_unlock(&ctrl
->namespaces_mutex
);
2081 EXPORT_SYMBOL_GPL(nvme_start_queues
);
2083 int __init
nvme_core_init(void)
2087 result
= __register_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme",
2091 else if (result
> 0)
2092 nvme_char_major
= result
;
2094 nvme_class
= class_create(THIS_MODULE
, "nvme");
2095 if (IS_ERR(nvme_class
)) {
2096 result
= PTR_ERR(nvme_class
);
2097 goto unregister_chrdev
;
2103 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2107 void nvme_core_exit(void)
2109 class_destroy(nvme_class
);
2110 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2113 MODULE_LICENSE("GPL");
2114 MODULE_VERSION("1.0");
2115 module_init(nvme_core_init
);
2116 module_exit(nvme_core_exit
);