2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <linux/pm_qos.h>
31 #include <asm/unaligned.h>
36 #define NVME_MINORS (1U << MINORBITS)
38 unsigned char admin_timeout
= 60;
39 module_param(admin_timeout
, byte
, 0644);
40 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
41 EXPORT_SYMBOL_GPL(admin_timeout
);
43 unsigned char nvme_io_timeout
= 30;
44 module_param_named(io_timeout
, nvme_io_timeout
, byte
, 0644);
45 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
46 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
48 static unsigned char shutdown_timeout
= 5;
49 module_param(shutdown_timeout
, byte
, 0644);
50 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
52 static u8 nvme_max_retries
= 5;
53 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
54 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
56 static int nvme_char_major
;
57 module_param(nvme_char_major
, int, 0);
59 static unsigned long default_ps_max_latency_us
= 100000;
60 module_param(default_ps_max_latency_us
, ulong
, 0644);
61 MODULE_PARM_DESC(default_ps_max_latency_us
,
62 "max power saving latency for new devices; use PM QOS to change per device");
64 static bool force_apst
;
65 module_param(force_apst
, bool, 0644);
66 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
68 struct workqueue_struct
*nvme_wq
;
69 EXPORT_SYMBOL_GPL(nvme_wq
);
71 static LIST_HEAD(nvme_ctrl_list
);
72 static DEFINE_SPINLOCK(dev_list_lock
);
74 static struct class *nvme_class
;
76 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
78 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
80 if (!queue_work(nvme_wq
, &ctrl
->reset_work
))
84 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
86 static int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
90 ret
= nvme_reset_ctrl(ctrl
);
92 flush_work(&ctrl
->reset_work
);
96 static blk_status_t
nvme_error_status(struct request
*req
)
98 switch (nvme_req(req
)->status
& 0x7ff) {
101 case NVME_SC_CAP_EXCEEDED
:
102 return BLK_STS_NOSPC
;
103 case NVME_SC_ONCS_NOT_SUPPORTED
:
104 return BLK_STS_NOTSUPP
;
105 case NVME_SC_WRITE_FAULT
:
106 case NVME_SC_READ_ERROR
:
107 case NVME_SC_UNWRITTEN_BLOCK
:
108 return BLK_STS_MEDIUM
;
110 return BLK_STS_IOERR
;
114 static inline bool nvme_req_needs_retry(struct request
*req
)
116 if (blk_noretry_request(req
))
118 if (nvme_req(req
)->status
& NVME_SC_DNR
)
120 if (jiffies
- req
->start_time
>= req
->timeout
)
122 if (nvme_req(req
)->retries
>= nvme_max_retries
)
127 void nvme_complete_rq(struct request
*req
)
129 if (unlikely(nvme_req(req
)->status
&& nvme_req_needs_retry(req
))) {
130 nvme_req(req
)->retries
++;
131 blk_mq_requeue_request(req
, !blk_mq_queue_stopped(req
->q
));
135 blk_mq_end_request(req
, nvme_error_status(req
));
137 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
139 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
143 if (!blk_mq_request_started(req
))
146 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
147 "Cancelling I/O %d", req
->tag
);
149 status
= NVME_SC_ABORT_REQ
;
150 if (blk_queue_dying(req
->q
))
151 status
|= NVME_SC_DNR
;
152 nvme_req(req
)->status
= status
;
153 blk_mq_complete_request(req
);
156 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
158 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
159 enum nvme_ctrl_state new_state
)
161 enum nvme_ctrl_state old_state
;
162 bool changed
= false;
164 spin_lock_irq(&ctrl
->lock
);
166 old_state
= ctrl
->state
;
171 case NVME_CTRL_RESETTING
:
172 case NVME_CTRL_RECONNECTING
:
179 case NVME_CTRL_RESETTING
:
189 case NVME_CTRL_RECONNECTING
:
198 case NVME_CTRL_DELETING
:
201 case NVME_CTRL_RESETTING
:
202 case NVME_CTRL_RECONNECTING
:
211 case NVME_CTRL_DELETING
:
223 ctrl
->state
= new_state
;
225 spin_unlock_irq(&ctrl
->lock
);
229 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
231 static void nvme_free_ns(struct kref
*kref
)
233 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
236 nvme_nvm_unregister(ns
);
239 spin_lock(&dev_list_lock
);
240 ns
->disk
->private_data
= NULL
;
241 spin_unlock(&dev_list_lock
);
245 ida_simple_remove(&ns
->ctrl
->ns_ida
, ns
->instance
);
246 nvme_put_ctrl(ns
->ctrl
);
250 static void nvme_put_ns(struct nvme_ns
*ns
)
252 kref_put(&ns
->kref
, nvme_free_ns
);
255 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
259 spin_lock(&dev_list_lock
);
260 ns
= disk
->private_data
;
262 if (!kref_get_unless_zero(&ns
->kref
))
264 if (!try_module_get(ns
->ctrl
->ops
->module
))
267 spin_unlock(&dev_list_lock
);
272 kref_put(&ns
->kref
, nvme_free_ns
);
274 spin_unlock(&dev_list_lock
);
278 struct request
*nvme_alloc_request(struct request_queue
*q
,
279 struct nvme_command
*cmd
, unsigned int flags
, int qid
)
281 unsigned op
= nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
284 if (qid
== NVME_QID_ANY
) {
285 req
= blk_mq_alloc_request(q
, op
, flags
);
287 req
= blk_mq_alloc_request_hctx(q
, op
, flags
,
293 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
294 nvme_req(req
)->cmd
= cmd
;
298 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
300 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
301 struct nvme_command
*cmnd
)
303 memset(cmnd
, 0, sizeof(*cmnd
));
304 cmnd
->common
.opcode
= nvme_cmd_flush
;
305 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
308 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
309 struct nvme_command
*cmnd
)
311 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
312 struct nvme_dsm_range
*range
;
315 range
= kmalloc_array(segments
, sizeof(*range
), GFP_ATOMIC
);
317 return BLK_STS_RESOURCE
;
319 __rq_for_each_bio(bio
, req
) {
320 u64 slba
= nvme_block_nr(ns
, bio
->bi_iter
.bi_sector
);
321 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
323 range
[n
].cattr
= cpu_to_le32(0);
324 range
[n
].nlb
= cpu_to_le32(nlb
);
325 range
[n
].slba
= cpu_to_le64(slba
);
329 if (WARN_ON_ONCE(n
!= segments
)) {
331 return BLK_STS_IOERR
;
334 memset(cmnd
, 0, sizeof(*cmnd
));
335 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
336 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->ns_id
);
337 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
338 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
340 req
->special_vec
.bv_page
= virt_to_page(range
);
341 req
->special_vec
.bv_offset
= offset_in_page(range
);
342 req
->special_vec
.bv_len
= sizeof(*range
) * segments
;
343 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
348 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
349 struct request
*req
, struct nvme_command
*cmnd
)
355 * If formated with metadata, require the block layer provide a buffer
356 * unless this namespace is formated such that the metadata can be
357 * stripped/generated by the controller with PRACT=1.
360 (!ns
->pi_type
|| ns
->ms
!= sizeof(struct t10_pi_tuple
)) &&
361 !blk_integrity_rq(req
) && !blk_rq_is_passthrough(req
))
362 return BLK_STS_NOTSUPP
;
364 if (req
->cmd_flags
& REQ_FUA
)
365 control
|= NVME_RW_FUA
;
366 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
367 control
|= NVME_RW_LR
;
369 if (req
->cmd_flags
& REQ_RAHEAD
)
370 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
372 memset(cmnd
, 0, sizeof(*cmnd
));
373 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
374 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
375 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
376 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
379 switch (ns
->pi_type
) {
380 case NVME_NS_DPS_PI_TYPE3
:
381 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
383 case NVME_NS_DPS_PI_TYPE1
:
384 case NVME_NS_DPS_PI_TYPE2
:
385 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
386 NVME_RW_PRINFO_PRCHK_REF
;
387 cmnd
->rw
.reftag
= cpu_to_le32(
388 nvme_block_nr(ns
, blk_rq_pos(req
)));
391 if (!blk_integrity_rq(req
))
392 control
|= NVME_RW_PRINFO_PRACT
;
395 cmnd
->rw
.control
= cpu_to_le16(control
);
396 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
400 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
401 struct nvme_command
*cmd
)
403 blk_status_t ret
= BLK_STS_OK
;
405 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
406 nvme_req(req
)->retries
= 0;
407 nvme_req(req
)->flags
= 0;
408 req
->rq_flags
|= RQF_DONTPREP
;
411 switch (req_op(req
)) {
414 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
417 nvme_setup_flush(ns
, cmd
);
419 case REQ_OP_WRITE_ZEROES
:
420 /* currently only aliased to deallocate for a few ctrls: */
422 ret
= nvme_setup_discard(ns
, req
, cmd
);
426 ret
= nvme_setup_rw(ns
, req
, cmd
);
430 return BLK_STS_IOERR
;
433 cmd
->common
.command_id
= req
->tag
;
436 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
439 * Returns 0 on success. If the result is negative, it's a Linux error code;
440 * if the result is positive, it's an NVM Express status code
442 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
443 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
444 unsigned timeout
, int qid
, int at_head
, int flags
)
449 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
453 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
455 if (buffer
&& bufflen
) {
456 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
461 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
463 *result
= nvme_req(req
)->result
;
464 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
467 ret
= nvme_req(req
)->status
;
469 blk_mq_free_request(req
);
472 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
474 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
475 void *buffer
, unsigned bufflen
)
477 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
480 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
482 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
483 void __user
*ubuffer
, unsigned bufflen
,
484 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
485 u32
*result
, unsigned timeout
)
487 bool write
= nvme_is_write(cmd
);
488 struct nvme_ns
*ns
= q
->queuedata
;
489 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
491 struct bio
*bio
= NULL
;
495 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
499 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
501 if (ubuffer
&& bufflen
) {
502 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
510 bio
->bi_bdev
= bdget_disk(disk
, 0);
516 if (meta_buffer
&& meta_len
) {
517 struct bio_integrity_payload
*bip
;
519 meta
= kmalloc(meta_len
, GFP_KERNEL
);
526 if (copy_from_user(meta
, meta_buffer
,
533 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
539 bip
->bip_iter
.bi_size
= meta_len
;
540 bip
->bip_iter
.bi_sector
= meta_seed
;
542 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
543 meta_len
, offset_in_page(meta
));
544 if (ret
!= meta_len
) {
551 blk_execute_rq(req
->q
, disk
, req
, 0);
552 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
555 ret
= nvme_req(req
)->status
;
557 *result
= le32_to_cpu(nvme_req(req
)->result
.u32
);
558 if (meta
&& !ret
&& !write
) {
559 if (copy_to_user(meta_buffer
, meta
, meta_len
))
566 if (disk
&& bio
->bi_bdev
)
568 blk_rq_unmap_user(bio
);
571 blk_mq_free_request(req
);
575 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
576 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
579 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
583 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
585 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
587 blk_mq_free_request(rq
);
590 dev_err(ctrl
->device
,
591 "failed nvme_keep_alive_end_io error=%d\n",
596 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
599 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
601 struct nvme_command c
;
604 memset(&c
, 0, sizeof(c
));
605 c
.common
.opcode
= nvme_admin_keep_alive
;
607 rq
= nvme_alloc_request(ctrl
->admin_q
, &c
, BLK_MQ_REQ_RESERVED
,
612 rq
->timeout
= ctrl
->kato
* HZ
;
613 rq
->end_io_data
= ctrl
;
615 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
620 static void nvme_keep_alive_work(struct work_struct
*work
)
622 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
623 struct nvme_ctrl
, ka_work
);
625 if (nvme_keep_alive(ctrl
)) {
626 /* allocation failure, reset the controller */
627 dev_err(ctrl
->device
, "keep-alive failed\n");
628 nvme_reset_ctrl(ctrl
);
633 void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
635 if (unlikely(ctrl
->kato
== 0))
638 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
639 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
641 EXPORT_SYMBOL_GPL(nvme_start_keep_alive
);
643 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
645 if (unlikely(ctrl
->kato
== 0))
648 cancel_delayed_work_sync(&ctrl
->ka_work
);
650 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
652 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
654 struct nvme_command c
= { };
657 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
658 c
.identify
.opcode
= nvme_admin_identify
;
659 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
661 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
665 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
666 sizeof(struct nvme_id_ctrl
));
672 static int nvme_identify_ns_descs(struct nvme_ns
*ns
, unsigned nsid
)
674 struct nvme_command c
= { };
680 c
.identify
.opcode
= nvme_admin_identify
;
681 c
.identify
.nsid
= cpu_to_le32(nsid
);
682 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
684 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
688 status
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, &c
, data
,
689 NVME_IDENTIFY_DATA_SIZE
);
693 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
694 struct nvme_ns_id_desc
*cur
= data
+ pos
;
700 case NVME_NIDT_EUI64
:
701 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
702 dev_warn(ns
->ctrl
->device
,
703 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
707 len
= NVME_NIDT_EUI64_LEN
;
708 memcpy(ns
->eui
, data
+ pos
+ sizeof(*cur
), len
);
710 case NVME_NIDT_NGUID
:
711 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
712 dev_warn(ns
->ctrl
->device
,
713 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
717 len
= NVME_NIDT_NGUID_LEN
;
718 memcpy(ns
->nguid
, data
+ pos
+ sizeof(*cur
), len
);
721 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
722 dev_warn(ns
->ctrl
->device
,
723 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
727 len
= NVME_NIDT_UUID_LEN
;
728 uuid_copy(&ns
->uuid
, data
+ pos
+ sizeof(*cur
));
731 /* Skip unnkown types */
743 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
745 struct nvme_command c
= { };
747 c
.identify
.opcode
= nvme_admin_identify
;
748 c
.identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
;
749 c
.identify
.nsid
= cpu_to_le32(nsid
);
750 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
753 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
754 struct nvme_id_ns
**id
)
756 struct nvme_command c
= { };
759 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
760 c
.identify
.opcode
= nvme_admin_identify
;
761 c
.identify
.nsid
= cpu_to_le32(nsid
);
762 c
.identify
.cns
= NVME_ID_CNS_NS
;
764 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
768 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
769 sizeof(struct nvme_id_ns
));
775 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
776 void *buffer
, size_t buflen
, u32
*result
)
778 struct nvme_command c
;
779 union nvme_result res
;
782 memset(&c
, 0, sizeof(c
));
783 c
.features
.opcode
= nvme_admin_get_features
;
784 c
.features
.nsid
= cpu_to_le32(nsid
);
785 c
.features
.fid
= cpu_to_le32(fid
);
787 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
, buffer
, buflen
, 0,
789 if (ret
>= 0 && result
)
790 *result
= le32_to_cpu(res
.u32
);
794 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
795 void *buffer
, size_t buflen
, u32
*result
)
797 struct nvme_command c
;
798 union nvme_result res
;
801 memset(&c
, 0, sizeof(c
));
802 c
.features
.opcode
= nvme_admin_set_features
;
803 c
.features
.fid
= cpu_to_le32(fid
);
804 c
.features
.dword11
= cpu_to_le32(dword11
);
806 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
807 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0);
808 if (ret
>= 0 && result
)
809 *result
= le32_to_cpu(res
.u32
);
813 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
)
815 struct nvme_command c
= { };
818 c
.common
.opcode
= nvme_admin_get_log_page
,
819 c
.common
.nsid
= cpu_to_le32(0xFFFFFFFF),
820 c
.common
.cdw10
[0] = cpu_to_le32(
821 (((sizeof(struct nvme_smart_log
) / 4) - 1) << 16) |
824 *log
= kmalloc(sizeof(struct nvme_smart_log
), GFP_KERNEL
);
828 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *log
,
829 sizeof(struct nvme_smart_log
));
835 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
837 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
839 int status
, nr_io_queues
;
841 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
847 * Degraded controllers might return an error when setting the queue
848 * count. We still want to be able to bring them online and offer
849 * access to the admin queue, as that might be only way to fix them up.
852 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
855 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
856 *count
= min(*count
, nr_io_queues
);
861 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
863 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
865 struct nvme_user_io io
;
866 struct nvme_command c
;
867 unsigned length
, meta_len
;
868 void __user
*metadata
;
870 if (copy_from_user(&io
, uio
, sizeof(io
)))
878 case nvme_cmd_compare
:
884 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
885 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
886 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
891 } else if (meta_len
) {
892 if ((io
.metadata
& 3) || !io
.metadata
)
896 memset(&c
, 0, sizeof(c
));
897 c
.rw
.opcode
= io
.opcode
;
898 c
.rw
.flags
= io
.flags
;
899 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
900 c
.rw
.slba
= cpu_to_le64(io
.slba
);
901 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
902 c
.rw
.control
= cpu_to_le16(io
.control
);
903 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
904 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
905 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
906 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
908 return __nvme_submit_user_cmd(ns
->queue
, &c
,
909 (void __user
*)(uintptr_t)io
.addr
, length
,
910 metadata
, meta_len
, io
.slba
, NULL
, 0);
913 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
914 struct nvme_passthru_cmd __user
*ucmd
)
916 struct nvme_passthru_cmd cmd
;
917 struct nvme_command c
;
918 unsigned timeout
= 0;
921 if (!capable(CAP_SYS_ADMIN
))
923 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
928 memset(&c
, 0, sizeof(c
));
929 c
.common
.opcode
= cmd
.opcode
;
930 c
.common
.flags
= cmd
.flags
;
931 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
932 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
933 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
934 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
935 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
936 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
937 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
938 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
939 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
942 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
944 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
945 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
946 &cmd
.result
, timeout
);
948 if (put_user(cmd
.result
, &ucmd
->result
))
955 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
956 unsigned int cmd
, unsigned long arg
)
958 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
962 force_successful_syscall_return();
964 case NVME_IOCTL_ADMIN_CMD
:
965 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
966 case NVME_IOCTL_IO_CMD
:
967 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
968 case NVME_IOCTL_SUBMIT_IO
:
969 return nvme_submit_io(ns
, (void __user
*)arg
);
970 #ifdef CONFIG_BLK_DEV_NVME_SCSI
971 case SG_GET_VERSION_NUM
:
972 return nvme_sg_get_version_num((void __user
*)arg
);
974 return nvme_sg_io(ns
, (void __user
*)arg
);
979 return nvme_nvm_ioctl(ns
, cmd
, arg
);
981 if (is_sed_ioctl(cmd
))
982 return sed_ioctl(ns
->ctrl
->opal_dev
, cmd
,
983 (void __user
*) arg
);
989 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
990 unsigned int cmd
, unsigned long arg
)
996 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
999 #define nvme_compat_ioctl NULL
1002 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1004 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
1007 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1009 struct nvme_ns
*ns
= disk
->private_data
;
1011 module_put(ns
->ctrl
->ops
->module
);
1015 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1017 /* some standard values */
1018 geo
->heads
= 1 << 6;
1019 geo
->sectors
= 1 << 5;
1020 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1024 #ifdef CONFIG_BLK_DEV_INTEGRITY
1025 static void nvme_prep_integrity(struct gendisk
*disk
, struct nvme_id_ns
*id
,
1028 struct nvme_ns
*ns
= disk
->private_data
;
1029 u16 old_ms
= ns
->ms
;
1032 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1033 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
1035 /* PI implementation requires metadata equal t10 pi tuple size */
1036 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1037 pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1039 if (blk_get_integrity(disk
) &&
1040 (ns
->pi_type
!= pi_type
|| ns
->ms
!= old_ms
||
1041 bs
!= queue_logical_block_size(disk
->queue
) ||
1042 (ns
->ms
&& ns
->ext
)))
1043 blk_integrity_unregister(disk
);
1045 ns
->pi_type
= pi_type
;
1048 static void nvme_init_integrity(struct nvme_ns
*ns
)
1050 struct blk_integrity integrity
;
1052 memset(&integrity
, 0, sizeof(integrity
));
1053 switch (ns
->pi_type
) {
1054 case NVME_NS_DPS_PI_TYPE3
:
1055 integrity
.profile
= &t10_pi_type3_crc
;
1056 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1057 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1059 case NVME_NS_DPS_PI_TYPE1
:
1060 case NVME_NS_DPS_PI_TYPE2
:
1061 integrity
.profile
= &t10_pi_type1_crc
;
1062 integrity
.tag_size
= sizeof(u16
);
1063 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1066 integrity
.profile
= NULL
;
1069 integrity
.tuple_size
= ns
->ms
;
1070 blk_integrity_register(ns
->disk
, &integrity
);
1071 blk_queue_max_integrity_segments(ns
->queue
, 1);
1074 static void nvme_prep_integrity(struct gendisk
*disk
, struct nvme_id_ns
*id
,
1078 static void nvme_init_integrity(struct nvme_ns
*ns
)
1081 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1083 static void nvme_set_chunk_size(struct nvme_ns
*ns
)
1085 u32 chunk_size
= (((u32
)ns
->noiob
) << (ns
->lba_shift
- 9));
1086 blk_queue_chunk_sectors(ns
->queue
, rounddown_pow_of_two(chunk_size
));
1089 static void nvme_config_discard(struct nvme_ns
*ns
)
1091 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1092 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
1094 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1095 NVME_DSM_MAX_RANGES
);
1097 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
1098 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
1099 blk_queue_max_discard_sectors(ns
->queue
, UINT_MAX
);
1100 blk_queue_max_discard_segments(ns
->queue
, NVME_DSM_MAX_RANGES
);
1101 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
1103 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1104 blk_queue_max_write_zeroes_sectors(ns
->queue
, UINT_MAX
);
1107 static int nvme_revalidate_ns(struct nvme_ns
*ns
, struct nvme_id_ns
**id
)
1109 if (nvme_identify_ns(ns
->ctrl
, ns
->ns_id
, id
)) {
1110 dev_warn(ns
->ctrl
->dev
, "%s: Identify failure\n", __func__
);
1114 if ((*id
)->ncap
== 0) {
1119 if (ns
->ctrl
->vs
>= NVME_VS(1, 1, 0))
1120 memcpy(ns
->eui
, (*id
)->eui64
, sizeof(ns
->eui
));
1121 if (ns
->ctrl
->vs
>= NVME_VS(1, 2, 0))
1122 memcpy(ns
->nguid
, (*id
)->nguid
, sizeof(ns
->nguid
));
1123 if (ns
->ctrl
->vs
>= NVME_VS(1, 3, 0)) {
1124 /* Don't treat error as fatal we potentially
1125 * already have a NGUID or EUI-64
1127 if (nvme_identify_ns_descs(ns
, ns
->ns_id
))
1128 dev_warn(ns
->ctrl
->device
,
1129 "%s: Identify Descriptors failed\n", __func__
);
1135 static void __nvme_revalidate_disk(struct gendisk
*disk
, struct nvme_id_ns
*id
)
1137 struct nvme_ns
*ns
= disk
->private_data
;
1141 * If identify namespace failed, use default 512 byte block size so
1142 * block layer can use before failing read/write for 0 capacity.
1144 ns
->lba_shift
= id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ds
;
1145 if (ns
->lba_shift
== 0)
1147 bs
= 1 << ns
->lba_shift
;
1148 ns
->noiob
= le16_to_cpu(id
->noiob
);
1150 blk_mq_freeze_queue(disk
->queue
);
1152 if (ns
->ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
)
1153 nvme_prep_integrity(disk
, id
, bs
);
1154 blk_queue_logical_block_size(ns
->queue
, bs
);
1156 nvme_set_chunk_size(ns
);
1157 if (ns
->ms
&& !blk_get_integrity(disk
) && !ns
->ext
)
1158 nvme_init_integrity(ns
);
1159 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
1160 set_capacity(disk
, 0);
1162 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
1164 if (ns
->ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
1165 nvme_config_discard(ns
);
1166 blk_mq_unfreeze_queue(disk
->queue
);
1169 static int nvme_revalidate_disk(struct gendisk
*disk
)
1171 struct nvme_ns
*ns
= disk
->private_data
;
1172 struct nvme_id_ns
*id
= NULL
;
1175 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
1176 set_capacity(disk
, 0);
1180 ret
= nvme_revalidate_ns(ns
, &id
);
1184 __nvme_revalidate_disk(disk
, id
);
1190 static char nvme_pr_type(enum pr_type type
)
1193 case PR_WRITE_EXCLUSIVE
:
1195 case PR_EXCLUSIVE_ACCESS
:
1197 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1199 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1201 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1203 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1210 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1211 u64 key
, u64 sa_key
, u8 op
)
1213 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1214 struct nvme_command c
;
1215 u8 data
[16] = { 0, };
1217 put_unaligned_le64(key
, &data
[0]);
1218 put_unaligned_le64(sa_key
, &data
[8]);
1220 memset(&c
, 0, sizeof(c
));
1221 c
.common
.opcode
= op
;
1222 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
1223 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
1225 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1228 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1229 u64
new, unsigned flags
)
1233 if (flags
& ~PR_FL_IGNORE_KEY
)
1236 cdw10
= old
? 2 : 0;
1237 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1238 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1239 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1242 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1243 enum pr_type type
, unsigned flags
)
1247 if (flags
& ~PR_FL_IGNORE_KEY
)
1250 cdw10
= nvme_pr_type(type
) << 8;
1251 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1252 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1255 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1256 enum pr_type type
, bool abort
)
1258 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
1259 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1262 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1264 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1265 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1268 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1270 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
1271 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1274 static const struct pr_ops nvme_pr_ops
= {
1275 .pr_register
= nvme_pr_register
,
1276 .pr_reserve
= nvme_pr_reserve
,
1277 .pr_release
= nvme_pr_release
,
1278 .pr_preempt
= nvme_pr_preempt
,
1279 .pr_clear
= nvme_pr_clear
,
1282 #ifdef CONFIG_BLK_SED_OPAL
1283 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
1286 struct nvme_ctrl
*ctrl
= data
;
1287 struct nvme_command cmd
;
1289 memset(&cmd
, 0, sizeof(cmd
));
1291 cmd
.common
.opcode
= nvme_admin_security_send
;
1293 cmd
.common
.opcode
= nvme_admin_security_recv
;
1294 cmd
.common
.nsid
= 0;
1295 cmd
.common
.cdw10
[0] = cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
1296 cmd
.common
.cdw10
[1] = cpu_to_le32(len
);
1298 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
1299 ADMIN_TIMEOUT
, NVME_QID_ANY
, 1, 0);
1301 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
1302 #endif /* CONFIG_BLK_SED_OPAL */
1304 static const struct block_device_operations nvme_fops
= {
1305 .owner
= THIS_MODULE
,
1306 .ioctl
= nvme_ioctl
,
1307 .compat_ioctl
= nvme_compat_ioctl
,
1309 .release
= nvme_release
,
1310 .getgeo
= nvme_getgeo
,
1311 .revalidate_disk
= nvme_revalidate_disk
,
1312 .pr_ops
= &nvme_pr_ops
,
1315 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
1317 unsigned long timeout
=
1318 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
1319 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
1322 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1325 if ((csts
& NVME_CSTS_RDY
) == bit
)
1329 if (fatal_signal_pending(current
))
1331 if (time_after(jiffies
, timeout
)) {
1332 dev_err(ctrl
->device
,
1333 "Device not ready; aborting %s\n", enabled
?
1334 "initialisation" : "reset");
1343 * If the device has been passed off to us in an enabled state, just clear
1344 * the enabled bit. The spec says we should set the 'shutdown notification
1345 * bits', but doing so may cause the device to complete commands to the
1346 * admin queue ... and we don't know what memory that might be pointing at!
1348 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1352 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1353 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
1355 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1359 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
1360 msleep(NVME_QUIRK_DELAY_AMOUNT
);
1362 return nvme_wait_ready(ctrl
, cap
, false);
1364 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
1366 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1369 * Default to a 4K page size, with the intention to update this
1370 * path in the future to accomodate architectures with differing
1371 * kernel and IO page sizes.
1373 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
1376 if (page_shift
< dev_page_min
) {
1377 dev_err(ctrl
->device
,
1378 "Minimum device page size %u too large for host (%u)\n",
1379 1 << dev_page_min
, 1 << page_shift
);
1383 ctrl
->page_size
= 1 << page_shift
;
1385 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
1386 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
1387 ctrl
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
1388 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
1389 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
1391 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1394 return nvme_wait_ready(ctrl
, cap
, true);
1396 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
1398 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
1400 unsigned long timeout
= jiffies
+ (shutdown_timeout
* HZ
);
1404 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1405 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
1407 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1411 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1412 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
1416 if (fatal_signal_pending(current
))
1418 if (time_after(jiffies
, timeout
)) {
1419 dev_err(ctrl
->device
,
1420 "Device shutdown incomplete; abort shutdown\n");
1427 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
1429 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1430 struct request_queue
*q
)
1434 if (ctrl
->max_hw_sectors
) {
1436 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1438 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1439 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1441 if (ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
)
1442 blk_queue_chunk_sectors(q
, ctrl
->max_hw_sectors
);
1443 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1444 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1446 blk_queue_write_cache(q
, vwc
, vwc
);
1449 static void nvme_configure_apst(struct nvme_ctrl
*ctrl
)
1452 * APST (Autonomous Power State Transition) lets us program a
1453 * table of power state transitions that the controller will
1454 * perform automatically. We configure it with a simple
1455 * heuristic: we are willing to spend at most 2% of the time
1456 * transitioning between power states. Therefore, when running
1457 * in any given state, we will enter the next lower-power
1458 * non-operational state after waiting 50 * (enlat + exlat)
1459 * microseconds, as long as that state's exit latency is under
1460 * the requested maximum latency.
1462 * We will not autonomously enter any non-operational state for
1463 * which the total latency exceeds ps_max_latency_us. Users
1464 * can set ps_max_latency_us to zero to turn off APST.
1468 struct nvme_feat_auto_pst
*table
;
1474 * If APST isn't supported or if we haven't been initialized yet,
1475 * then don't do anything.
1480 if (ctrl
->npss
> 31) {
1481 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
1485 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
1489 if (ctrl
->ps_max_latency_us
== 0) {
1490 /* Turn off APST. */
1492 dev_dbg(ctrl
->device
, "APST disabled\n");
1494 __le64 target
= cpu_to_le64(0);
1498 * Walk through all states from lowest- to highest-power.
1499 * According to the spec, lower-numbered states use more
1500 * power. NPSS, despite the name, is the index of the
1501 * lowest-power state, not the number of states.
1503 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
1504 u64 total_latency_us
, exit_latency_us
, transition_ms
;
1507 table
->entries
[state
] = target
;
1510 * Don't allow transitions to the deepest state
1511 * if it's quirked off.
1513 if (state
== ctrl
->npss
&&
1514 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
1518 * Is this state a useful non-operational state for
1519 * higher-power states to autonomously transition to?
1521 if (!(ctrl
->psd
[state
].flags
&
1522 NVME_PS_FLAGS_NON_OP_STATE
))
1526 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
1527 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
1532 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
1535 * This state is good. Use it as the APST idle
1536 * target for higher power states.
1538 transition_ms
= total_latency_us
+ 19;
1539 do_div(transition_ms
, 20);
1540 if (transition_ms
> (1 << 24) - 1)
1541 transition_ms
= (1 << 24) - 1;
1543 target
= cpu_to_le64((state
<< 3) |
1544 (transition_ms
<< 8));
1549 if (total_latency_us
> max_lat_us
)
1550 max_lat_us
= total_latency_us
;
1556 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
1558 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
1559 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
1563 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
1564 table
, sizeof(*table
), NULL
);
1566 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
1571 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
1573 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1577 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
1578 case PM_QOS_LATENCY_ANY
:
1586 if (ctrl
->ps_max_latency_us
!= latency
) {
1587 ctrl
->ps_max_latency_us
= latency
;
1588 nvme_configure_apst(ctrl
);
1592 struct nvme_core_quirk_entry
{
1594 * NVMe model and firmware strings are padded with spaces. For
1595 * simplicity, strings in the quirk table are padded with NULLs
1601 unsigned long quirks
;
1604 static const struct nvme_core_quirk_entry core_quirks
[] = {
1607 * This Toshiba device seems to die using any APST states. See:
1608 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
1611 .mn
= "THNSF5256GPUK TOSHIBA",
1612 .quirks
= NVME_QUIRK_NO_APST
,
1616 /* match is null-terminated but idstr is space-padded. */
1617 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
1624 matchlen
= strlen(match
);
1625 WARN_ON_ONCE(matchlen
> len
);
1627 if (memcmp(idstr
, match
, matchlen
))
1630 for (; matchlen
< len
; matchlen
++)
1631 if (idstr
[matchlen
] != ' ')
1637 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
1638 const struct nvme_core_quirk_entry
*q
)
1640 return q
->vid
== le16_to_cpu(id
->vid
) &&
1641 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
1642 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
1646 * Initialize the cached copies of the Identify data and various controller
1647 * register in our nvme_ctrl structure. This should be called as soon as
1648 * the admin queue is fully up and running.
1650 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
1652 struct nvme_id_ctrl
*id
;
1654 int ret
, page_shift
;
1658 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
1660 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
1664 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
1666 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
1669 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
1671 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1672 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
1674 ret
= nvme_identify_ctrl(ctrl
, &id
);
1676 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
1680 if (!ctrl
->identified
) {
1682 * Check for quirks. Quirk can depend on firmware version,
1683 * so, in principle, the set of quirks present can change
1684 * across a reset. As a possible future enhancement, we
1685 * could re-scan for quirks every time we reinitialize
1686 * the device, but we'd have to make sure that the driver
1687 * behaves intelligently if the quirks change.
1692 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
1693 if (quirk_matches(id
, &core_quirks
[i
]))
1694 ctrl
->quirks
|= core_quirks
[i
].quirks
;
1698 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
1699 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
1700 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
1703 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
1704 ctrl
->vid
= le16_to_cpu(id
->vid
);
1705 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
1706 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
1707 ctrl
->vwc
= id
->vwc
;
1708 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
1709 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
1710 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
1711 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
1713 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
1715 max_hw_sectors
= UINT_MAX
;
1716 ctrl
->max_hw_sectors
=
1717 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
1719 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
1720 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
1721 ctrl
->kas
= le16_to_cpu(id
->kas
);
1723 ctrl
->npss
= id
->npss
;
1724 prev_apsta
= ctrl
->apsta
;
1725 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
1726 if (force_apst
&& id
->apsta
) {
1727 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
1733 ctrl
->apsta
= id
->apsta
;
1735 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
1737 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
1738 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
1739 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
1740 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
1741 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
1744 * In fabrics we need to verify the cntlid matches the
1747 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
))
1750 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
1751 dev_err(ctrl
->device
,
1752 "keep-alive support is mandatory for fabrics\n");
1756 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
1757 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
1758 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
1763 if (ctrl
->apsta
&& !prev_apsta
)
1764 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
1765 else if (!ctrl
->apsta
&& prev_apsta
)
1766 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
1768 nvme_configure_apst(ctrl
);
1770 ctrl
->identified
= true;
1774 EXPORT_SYMBOL_GPL(nvme_init_identify
);
1776 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
1778 struct nvme_ctrl
*ctrl
;
1779 int instance
= iminor(inode
);
1782 spin_lock(&dev_list_lock
);
1783 list_for_each_entry(ctrl
, &nvme_ctrl_list
, node
) {
1784 if (ctrl
->instance
!= instance
)
1787 if (!ctrl
->admin_q
) {
1791 if (!kref_get_unless_zero(&ctrl
->kref
))
1793 file
->private_data
= ctrl
;
1797 spin_unlock(&dev_list_lock
);
1802 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
1804 nvme_put_ctrl(file
->private_data
);
1808 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
1813 mutex_lock(&ctrl
->namespaces_mutex
);
1814 if (list_empty(&ctrl
->namespaces
)) {
1819 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
1820 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
1821 dev_warn(ctrl
->device
,
1822 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1827 dev_warn(ctrl
->device
,
1828 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1829 kref_get(&ns
->kref
);
1830 mutex_unlock(&ctrl
->namespaces_mutex
);
1832 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
1837 mutex_unlock(&ctrl
->namespaces_mutex
);
1841 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
1844 struct nvme_ctrl
*ctrl
= file
->private_data
;
1845 void __user
*argp
= (void __user
*)arg
;
1848 case NVME_IOCTL_ADMIN_CMD
:
1849 return nvme_user_cmd(ctrl
, NULL
, argp
);
1850 case NVME_IOCTL_IO_CMD
:
1851 return nvme_dev_user_cmd(ctrl
, argp
);
1852 case NVME_IOCTL_RESET
:
1853 dev_warn(ctrl
->device
, "resetting controller\n");
1854 return nvme_reset_ctrl_sync(ctrl
);
1855 case NVME_IOCTL_SUBSYS_RESET
:
1856 return nvme_reset_subsystem(ctrl
);
1857 case NVME_IOCTL_RESCAN
:
1858 nvme_queue_scan(ctrl
);
1865 static const struct file_operations nvme_dev_fops
= {
1866 .owner
= THIS_MODULE
,
1867 .open
= nvme_dev_open
,
1868 .release
= nvme_dev_release
,
1869 .unlocked_ioctl
= nvme_dev_ioctl
,
1870 .compat_ioctl
= nvme_dev_ioctl
,
1873 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
1874 struct device_attribute
*attr
, const char *buf
,
1877 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1880 ret
= nvme_reset_ctrl_sync(ctrl
);
1885 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
1887 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
1888 struct device_attribute
*attr
, const char *buf
,
1891 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1893 nvme_queue_scan(ctrl
);
1896 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
1898 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
1901 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
1902 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1903 int serial_len
= sizeof(ctrl
->serial
);
1904 int model_len
= sizeof(ctrl
->model
);
1906 if (memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
1907 return sprintf(buf
, "eui.%16phN\n", ns
->nguid
);
1909 if (memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1910 return sprintf(buf
, "eui.%8phN\n", ns
->eui
);
1912 while (ctrl
->serial
[serial_len
- 1] == ' ')
1914 while (ctrl
->model
[model_len
- 1] == ' ')
1917 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl
->vid
,
1918 serial_len
, ctrl
->serial
, model_len
, ctrl
->model
, ns
->ns_id
);
1920 static DEVICE_ATTR(wwid
, S_IRUGO
, wwid_show
, NULL
);
1922 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
1925 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
1926 return sprintf(buf
, "%pU\n", ns
->nguid
);
1928 static DEVICE_ATTR(nguid
, S_IRUGO
, nguid_show
, NULL
);
1930 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
1933 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
1935 /* For backward compatibility expose the NGUID to userspace if
1936 * we have no UUID set
1938 if (uuid_is_null(&ns
->uuid
)) {
1939 printk_ratelimited(KERN_WARNING
1940 "No UUID available providing old NGUID\n");
1941 return sprintf(buf
, "%pU\n", ns
->nguid
);
1943 return sprintf(buf
, "%pU\n", &ns
->uuid
);
1945 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
1947 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
1950 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
1951 return sprintf(buf
, "%8phd\n", ns
->eui
);
1953 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
1955 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
1958 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
1959 return sprintf(buf
, "%d\n", ns
->ns_id
);
1961 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
1963 static struct attribute
*nvme_ns_attrs
[] = {
1964 &dev_attr_wwid
.attr
,
1965 &dev_attr_uuid
.attr
,
1966 &dev_attr_nguid
.attr
,
1968 &dev_attr_nsid
.attr
,
1972 static umode_t
nvme_ns_attrs_are_visible(struct kobject
*kobj
,
1973 struct attribute
*a
, int n
)
1975 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1976 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
1978 if (a
== &dev_attr_uuid
.attr
) {
1979 if (uuid_is_null(&ns
->uuid
) ||
1980 !memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
1983 if (a
== &dev_attr_nguid
.attr
) {
1984 if (!memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
1987 if (a
== &dev_attr_eui
.attr
) {
1988 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
1994 static const struct attribute_group nvme_ns_attr_group
= {
1995 .attrs
= nvme_ns_attrs
,
1996 .is_visible
= nvme_ns_attrs_are_visible
,
1999 #define nvme_show_str_function(field) \
2000 static ssize_t field##_show(struct device *dev, \
2001 struct device_attribute *attr, char *buf) \
2003 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2004 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
2006 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2008 #define nvme_show_int_function(field) \
2009 static ssize_t field##_show(struct device *dev, \
2010 struct device_attribute *attr, char *buf) \
2012 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2013 return sprintf(buf, "%d\n", ctrl->field); \
2015 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2017 nvme_show_str_function(model
);
2018 nvme_show_str_function(serial
);
2019 nvme_show_str_function(firmware_rev
);
2020 nvme_show_int_function(cntlid
);
2022 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
2023 struct device_attribute
*attr
, const char *buf
,
2026 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2028 if (device_remove_file_self(dev
, attr
))
2029 ctrl
->ops
->delete_ctrl(ctrl
);
2032 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
2034 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
2035 struct device_attribute
*attr
,
2038 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2040 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
2042 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
2044 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
2045 struct device_attribute
*attr
,
2048 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2049 static const char *const state_name
[] = {
2050 [NVME_CTRL_NEW
] = "new",
2051 [NVME_CTRL_LIVE
] = "live",
2052 [NVME_CTRL_RESETTING
] = "resetting",
2053 [NVME_CTRL_RECONNECTING
]= "reconnecting",
2054 [NVME_CTRL_DELETING
] = "deleting",
2055 [NVME_CTRL_DEAD
] = "dead",
2058 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
2059 state_name
[ctrl
->state
])
2060 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
2062 return sprintf(buf
, "unknown state\n");
2065 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
2067 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
2068 struct device_attribute
*attr
,
2071 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2073 return snprintf(buf
, PAGE_SIZE
, "%s\n",
2074 ctrl
->ops
->get_subsysnqn(ctrl
));
2076 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
2078 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
2079 struct device_attribute
*attr
,
2082 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2084 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
2086 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
2088 static struct attribute
*nvme_dev_attrs
[] = {
2089 &dev_attr_reset_controller
.attr
,
2090 &dev_attr_rescan_controller
.attr
,
2091 &dev_attr_model
.attr
,
2092 &dev_attr_serial
.attr
,
2093 &dev_attr_firmware_rev
.attr
,
2094 &dev_attr_cntlid
.attr
,
2095 &dev_attr_delete_controller
.attr
,
2096 &dev_attr_transport
.attr
,
2097 &dev_attr_subsysnqn
.attr
,
2098 &dev_attr_address
.attr
,
2099 &dev_attr_state
.attr
,
2103 #define CHECK_ATTR(ctrl, a, name) \
2104 if ((a) == &dev_attr_##name.attr && \
2105 !(ctrl)->ops->get_##name) \
2108 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
2109 struct attribute
*a
, int n
)
2111 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2112 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2114 if (a
== &dev_attr_delete_controller
.attr
) {
2115 if (!ctrl
->ops
->delete_ctrl
)
2119 CHECK_ATTR(ctrl
, a
, subsysnqn
);
2120 CHECK_ATTR(ctrl
, a
, address
);
2125 static struct attribute_group nvme_dev_attrs_group
= {
2126 .attrs
= nvme_dev_attrs
,
2127 .is_visible
= nvme_dev_attrs_are_visible
,
2130 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
2131 &nvme_dev_attrs_group
,
2135 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
2137 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
2138 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
2140 return nsa
->ns_id
- nsb
->ns_id
;
2143 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2145 struct nvme_ns
*ns
, *ret
= NULL
;
2147 mutex_lock(&ctrl
->namespaces_mutex
);
2148 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2149 if (ns
->ns_id
== nsid
) {
2150 kref_get(&ns
->kref
);
2154 if (ns
->ns_id
> nsid
)
2157 mutex_unlock(&ctrl
->namespaces_mutex
);
2161 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2164 struct gendisk
*disk
;
2165 struct nvme_id_ns
*id
;
2166 char disk_name
[DISK_NAME_LEN
];
2167 int node
= dev_to_node(ctrl
->dev
);
2169 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
2173 ns
->instance
= ida_simple_get(&ctrl
->ns_ida
, 1, 0, GFP_KERNEL
);
2174 if (ns
->instance
< 0)
2177 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
2178 if (IS_ERR(ns
->queue
))
2179 goto out_release_instance
;
2180 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
2181 ns
->queue
->queuedata
= ns
;
2184 kref_init(&ns
->kref
);
2186 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
2188 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
2189 nvme_set_queue_limits(ctrl
, ns
->queue
);
2191 sprintf(disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->instance
);
2193 if (nvme_revalidate_ns(ns
, &id
))
2194 goto out_free_queue
;
2196 if (nvme_nvm_ns_supported(ns
, id
) &&
2197 nvme_nvm_register(ns
, disk_name
, node
)) {
2198 dev_warn(ctrl
->device
, "%s: LightNVM init failure\n", __func__
);
2202 disk
= alloc_disk_node(0, node
);
2206 disk
->fops
= &nvme_fops
;
2207 disk
->private_data
= ns
;
2208 disk
->queue
= ns
->queue
;
2209 disk
->flags
= GENHD_FL_EXT_DEVT
;
2210 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
2213 __nvme_revalidate_disk(disk
, id
);
2215 mutex_lock(&ctrl
->namespaces_mutex
);
2216 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
2217 mutex_unlock(&ctrl
->namespaces_mutex
);
2219 kref_get(&ctrl
->kref
);
2223 device_add_disk(ctrl
->device
, ns
->disk
);
2224 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
2225 &nvme_ns_attr_group
))
2226 pr_warn("%s: failed to create sysfs group for identification\n",
2227 ns
->disk
->disk_name
);
2228 if (ns
->ndev
&& nvme_nvm_register_sysfs(ns
))
2229 pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
2230 ns
->disk
->disk_name
);
2235 blk_cleanup_queue(ns
->queue
);
2236 out_release_instance
:
2237 ida_simple_remove(&ctrl
->ns_ida
, ns
->instance
);
2242 static void nvme_ns_remove(struct nvme_ns
*ns
)
2244 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
2247 if (ns
->disk
&& ns
->disk
->flags
& GENHD_FL_UP
) {
2248 if (blk_get_integrity(ns
->disk
))
2249 blk_integrity_unregister(ns
->disk
);
2250 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
2251 &nvme_ns_attr_group
);
2253 nvme_nvm_unregister_sysfs(ns
);
2254 del_gendisk(ns
->disk
);
2255 blk_cleanup_queue(ns
->queue
);
2258 mutex_lock(&ns
->ctrl
->namespaces_mutex
);
2259 list_del_init(&ns
->list
);
2260 mutex_unlock(&ns
->ctrl
->namespaces_mutex
);
2265 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2269 ns
= nvme_find_get_ns(ctrl
, nsid
);
2271 if (ns
->disk
&& revalidate_disk(ns
->disk
))
2275 nvme_alloc_ns(ctrl
, nsid
);
2278 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
2281 struct nvme_ns
*ns
, *next
;
2283 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
2284 if (ns
->ns_id
> nsid
)
2289 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
2293 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
2296 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
2300 for (i
= 0; i
< num_lists
; i
++) {
2301 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
2305 for (j
= 0; j
< min(nn
, 1024U); j
++) {
2306 nsid
= le32_to_cpu(ns_list
[j
]);
2310 nvme_validate_ns(ctrl
, nsid
);
2312 while (++prev
< nsid
) {
2313 ns
= nvme_find_get_ns(ctrl
, prev
);
2323 nvme_remove_invalid_namespaces(ctrl
, prev
);
2329 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
2333 for (i
= 1; i
<= nn
; i
++)
2334 nvme_validate_ns(ctrl
, i
);
2336 nvme_remove_invalid_namespaces(ctrl
, nn
);
2339 static void nvme_scan_work(struct work_struct
*work
)
2341 struct nvme_ctrl
*ctrl
=
2342 container_of(work
, struct nvme_ctrl
, scan_work
);
2343 struct nvme_id_ctrl
*id
;
2346 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2349 if (nvme_identify_ctrl(ctrl
, &id
))
2352 nn
= le32_to_cpu(id
->nn
);
2353 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
2354 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
2355 if (!nvme_scan_ns_list(ctrl
, nn
))
2358 nvme_scan_ns_sequential(ctrl
, nn
);
2360 mutex_lock(&ctrl
->namespaces_mutex
);
2361 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
2362 mutex_unlock(&ctrl
->namespaces_mutex
);
2366 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
2369 * Do not queue new scan work when a controller is reset during
2372 if (ctrl
->state
== NVME_CTRL_LIVE
)
2373 queue_work(nvme_wq
, &ctrl
->scan_work
);
2375 EXPORT_SYMBOL_GPL(nvme_queue_scan
);
2378 * This function iterates the namespace list unlocked to allow recovery from
2379 * controller failure. It is up to the caller to ensure the namespace list is
2380 * not modified by scan work while this function is executing.
2382 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
2384 struct nvme_ns
*ns
, *next
;
2387 * The dead states indicates the controller was not gracefully
2388 * disconnected. In that case, we won't be able to flush any data while
2389 * removing the namespaces' disks; fail all the queues now to avoid
2390 * potentially having to clean up the failed sync later.
2392 if (ctrl
->state
== NVME_CTRL_DEAD
)
2393 nvme_kill_queues(ctrl
);
2395 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
2398 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
2400 static void nvme_async_event_work(struct work_struct
*work
)
2402 struct nvme_ctrl
*ctrl
=
2403 container_of(work
, struct nvme_ctrl
, async_event_work
);
2405 spin_lock_irq(&ctrl
->lock
);
2406 while (ctrl
->event_limit
> 0) {
2407 int aer_idx
= --ctrl
->event_limit
;
2409 spin_unlock_irq(&ctrl
->lock
);
2410 ctrl
->ops
->submit_async_event(ctrl
, aer_idx
);
2411 spin_lock_irq(&ctrl
->lock
);
2413 spin_unlock_irq(&ctrl
->lock
);
2416 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
2417 union nvme_result
*res
)
2419 u32 result
= le32_to_cpu(res
->u32
);
2422 switch (le16_to_cpu(status
) >> 1) {
2423 case NVME_SC_SUCCESS
:
2426 case NVME_SC_ABORT_REQ
:
2427 ++ctrl
->event_limit
;
2428 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2437 switch (result
& 0xff07) {
2438 case NVME_AER_NOTICE_NS_CHANGED
:
2439 dev_info(ctrl
->device
, "rescanning\n");
2440 nvme_queue_scan(ctrl
);
2443 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
2446 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
2448 void nvme_queue_async_events(struct nvme_ctrl
*ctrl
)
2450 ctrl
->event_limit
= NVME_NR_AERS
;
2451 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2453 EXPORT_SYMBOL_GPL(nvme_queue_async_events
);
2455 static DEFINE_IDA(nvme_instance_ida
);
2457 static int nvme_set_instance(struct nvme_ctrl
*ctrl
)
2459 int instance
, error
;
2462 if (!ida_pre_get(&nvme_instance_ida
, GFP_KERNEL
))
2465 spin_lock(&dev_list_lock
);
2466 error
= ida_get_new(&nvme_instance_ida
, &instance
);
2467 spin_unlock(&dev_list_lock
);
2468 } while (error
== -EAGAIN
);
2473 ctrl
->instance
= instance
;
2477 static void nvme_release_instance(struct nvme_ctrl
*ctrl
)
2479 spin_lock(&dev_list_lock
);
2480 ida_remove(&nvme_instance_ida
, ctrl
->instance
);
2481 spin_unlock(&dev_list_lock
);
2484 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
2486 flush_work(&ctrl
->async_event_work
);
2487 flush_work(&ctrl
->scan_work
);
2488 nvme_remove_namespaces(ctrl
);
2490 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
2492 spin_lock(&dev_list_lock
);
2493 list_del(&ctrl
->node
);
2494 spin_unlock(&dev_list_lock
);
2496 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
2498 static void nvme_free_ctrl(struct kref
*kref
)
2500 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
2502 put_device(ctrl
->device
);
2503 nvme_release_instance(ctrl
);
2504 ida_destroy(&ctrl
->ns_ida
);
2506 ctrl
->ops
->free_ctrl(ctrl
);
2509 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
2511 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
2513 EXPORT_SYMBOL_GPL(nvme_put_ctrl
);
2516 * Initialize a NVMe controller structures. This needs to be called during
2517 * earliest initialization so that we have the initialized structured around
2520 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
2521 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
2525 ctrl
->state
= NVME_CTRL_NEW
;
2526 spin_lock_init(&ctrl
->lock
);
2527 INIT_LIST_HEAD(&ctrl
->namespaces
);
2528 mutex_init(&ctrl
->namespaces_mutex
);
2529 kref_init(&ctrl
->kref
);
2532 ctrl
->quirks
= quirks
;
2533 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
2534 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
2536 ret
= nvme_set_instance(ctrl
);
2540 ctrl
->device
= device_create_with_groups(nvme_class
, ctrl
->dev
,
2541 MKDEV(nvme_char_major
, ctrl
->instance
),
2542 ctrl
, nvme_dev_attr_groups
,
2543 "nvme%d", ctrl
->instance
);
2544 if (IS_ERR(ctrl
->device
)) {
2545 ret
= PTR_ERR(ctrl
->device
);
2546 goto out_release_instance
;
2548 get_device(ctrl
->device
);
2549 ida_init(&ctrl
->ns_ida
);
2551 spin_lock(&dev_list_lock
);
2552 list_add_tail(&ctrl
->node
, &nvme_ctrl_list
);
2553 spin_unlock(&dev_list_lock
);
2556 * Initialize latency tolerance controls. The sysfs files won't
2557 * be visible to userspace unless the device actually supports APST.
2559 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
2560 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
2561 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
2564 out_release_instance
:
2565 nvme_release_instance(ctrl
);
2569 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
2572 * nvme_kill_queues(): Ends all namespace queues
2573 * @ctrl: the dead controller that needs to end
2575 * Call this function when the driver determines it is unable to get the
2576 * controller in a state capable of servicing IO.
2578 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
2582 mutex_lock(&ctrl
->namespaces_mutex
);
2584 /* Forcibly start all queues to avoid having stuck requests */
2585 blk_mq_start_hw_queues(ctrl
->admin_q
);
2587 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2589 * Revalidating a dead namespace sets capacity to 0. This will
2590 * end buffered writers dirtying pages that can't be synced.
2592 if (!ns
->disk
|| test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
2594 revalidate_disk(ns
->disk
);
2595 blk_set_queue_dying(ns
->queue
);
2598 * Forcibly start all queues to avoid having stuck requests.
2599 * Note that we must ensure the queues are not stopped
2600 * when the final removal happens.
2602 blk_mq_start_hw_queues(ns
->queue
);
2604 /* draining requests in requeue list */
2605 blk_mq_kick_requeue_list(ns
->queue
);
2607 mutex_unlock(&ctrl
->namespaces_mutex
);
2609 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
2611 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
2615 mutex_lock(&ctrl
->namespaces_mutex
);
2616 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2617 blk_mq_unfreeze_queue(ns
->queue
);
2618 mutex_unlock(&ctrl
->namespaces_mutex
);
2620 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
2622 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
2626 mutex_lock(&ctrl
->namespaces_mutex
);
2627 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2628 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
2632 mutex_unlock(&ctrl
->namespaces_mutex
);
2634 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
2636 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
2640 mutex_lock(&ctrl
->namespaces_mutex
);
2641 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2642 blk_mq_freeze_queue_wait(ns
->queue
);
2643 mutex_unlock(&ctrl
->namespaces_mutex
);
2645 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
2647 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
2651 mutex_lock(&ctrl
->namespaces_mutex
);
2652 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2653 blk_freeze_queue_start(ns
->queue
);
2654 mutex_unlock(&ctrl
->namespaces_mutex
);
2656 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
2658 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
2662 mutex_lock(&ctrl
->namespaces_mutex
);
2663 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2664 blk_mq_quiesce_queue(ns
->queue
);
2665 mutex_unlock(&ctrl
->namespaces_mutex
);
2667 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
2669 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
2673 mutex_lock(&ctrl
->namespaces_mutex
);
2674 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2675 blk_mq_unquiesce_queue(ns
->queue
);
2676 blk_mq_kick_requeue_list(ns
->queue
);
2678 mutex_unlock(&ctrl
->namespaces_mutex
);
2680 EXPORT_SYMBOL_GPL(nvme_start_queues
);
2682 int __init
nvme_core_init(void)
2686 nvme_wq
= alloc_workqueue("nvme-wq",
2687 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
2691 result
= __register_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme",
2695 else if (result
> 0)
2696 nvme_char_major
= result
;
2698 nvme_class
= class_create(THIS_MODULE
, "nvme");
2699 if (IS_ERR(nvme_class
)) {
2700 result
= PTR_ERR(nvme_class
);
2701 goto unregister_chrdev
;
2707 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2709 destroy_workqueue(nvme_wq
);
2713 void nvme_core_exit(void)
2715 class_destroy(nvme_class
);
2716 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2717 destroy_workqueue(nvme_wq
);
2720 MODULE_LICENSE("GPL");
2721 MODULE_VERSION("1.0");
2722 module_init(nvme_core_init
);
2723 module_exit(nvme_core_exit
);