2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <linux/pm_qos.h>
30 #include <asm/unaligned.h>
35 #define NVME_MINORS (1U << MINORBITS)
37 unsigned int admin_timeout
= 60;
38 module_param(admin_timeout
, uint
, 0644);
39 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
40 EXPORT_SYMBOL_GPL(admin_timeout
);
42 unsigned int nvme_io_timeout
= 30;
43 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
44 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
45 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
47 static unsigned char shutdown_timeout
= 5;
48 module_param(shutdown_timeout
, byte
, 0644);
49 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
51 static u8 nvme_max_retries
= 5;
52 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
53 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
55 static unsigned long default_ps_max_latency_us
= 100000;
56 module_param(default_ps_max_latency_us
, ulong
, 0644);
57 MODULE_PARM_DESC(default_ps_max_latency_us
,
58 "max power saving latency for new devices; use PM QOS to change per device");
60 static bool force_apst
;
61 module_param(force_apst
, bool, 0644);
62 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
65 module_param(streams
, bool, 0644);
66 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
68 struct workqueue_struct
*nvme_wq
;
69 EXPORT_SYMBOL_GPL(nvme_wq
);
71 static DEFINE_IDA(nvme_subsystems_ida
);
72 static LIST_HEAD(nvme_subsystems
);
73 static DEFINE_MUTEX(nvme_subsystems_lock
);
75 static DEFINE_IDA(nvme_instance_ida
);
76 static dev_t nvme_chr_devt
;
77 static struct class *nvme_class
;
78 static struct class *nvme_subsys_class
;
80 static void nvme_ns_remove(struct nvme_ns
*ns
);
81 static int nvme_revalidate_disk(struct gendisk
*disk
);
83 static __le32
nvme_get_log_dw10(u8 lid
, size_t size
)
85 return cpu_to_le32((((size
/ 4) - 1) << 16) | lid
);
88 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
90 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
92 if (!queue_work(nvme_wq
, &ctrl
->reset_work
))
96 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
98 static int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
102 ret
= nvme_reset_ctrl(ctrl
);
104 flush_work(&ctrl
->reset_work
);
108 static void nvme_delete_ctrl_work(struct work_struct
*work
)
110 struct nvme_ctrl
*ctrl
=
111 container_of(work
, struct nvme_ctrl
, delete_work
);
113 flush_work(&ctrl
->reset_work
);
114 nvme_stop_ctrl(ctrl
);
115 nvme_remove_namespaces(ctrl
);
116 ctrl
->ops
->delete_ctrl(ctrl
);
117 nvme_uninit_ctrl(ctrl
);
121 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
123 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
125 if (!queue_work(nvme_wq
, &ctrl
->delete_work
))
129 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
131 int nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
136 * Keep a reference until the work is flushed since ->delete_ctrl
137 * can free the controller.
140 ret
= nvme_delete_ctrl(ctrl
);
142 flush_work(&ctrl
->delete_work
);
146 EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync
);
148 static inline bool nvme_ns_has_pi(struct nvme_ns
*ns
)
150 return ns
->pi_type
&& ns
->ms
== sizeof(struct t10_pi_tuple
);
153 static blk_status_t
nvme_error_status(struct request
*req
)
155 switch (nvme_req(req
)->status
& 0x7ff) {
156 case NVME_SC_SUCCESS
:
158 case NVME_SC_CAP_EXCEEDED
:
159 return BLK_STS_NOSPC
;
160 case NVME_SC_ONCS_NOT_SUPPORTED
:
161 return BLK_STS_NOTSUPP
;
162 case NVME_SC_WRITE_FAULT
:
163 case NVME_SC_READ_ERROR
:
164 case NVME_SC_UNWRITTEN_BLOCK
:
165 case NVME_SC_ACCESS_DENIED
:
166 case NVME_SC_READ_ONLY
:
167 return BLK_STS_MEDIUM
;
168 case NVME_SC_GUARD_CHECK
:
169 case NVME_SC_APPTAG_CHECK
:
170 case NVME_SC_REFTAG_CHECK
:
171 case NVME_SC_INVALID_PI
:
172 return BLK_STS_PROTECTION
;
173 case NVME_SC_RESERVATION_CONFLICT
:
174 return BLK_STS_NEXUS
;
176 return BLK_STS_IOERR
;
180 static inline bool nvme_req_needs_retry(struct request
*req
)
182 if (blk_noretry_request(req
))
184 if (nvme_req(req
)->status
& NVME_SC_DNR
)
186 if (nvme_req(req
)->retries
>= nvme_max_retries
)
188 if (blk_queue_dying(req
->q
))
193 void nvme_complete_rq(struct request
*req
)
195 if (unlikely(nvme_req(req
)->status
&& nvme_req_needs_retry(req
))) {
196 nvme_req(req
)->retries
++;
197 blk_mq_requeue_request(req
, true);
201 blk_mq_end_request(req
, nvme_error_status(req
));
203 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
205 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
207 if (!blk_mq_request_started(req
))
210 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
211 "Cancelling I/O %d", req
->tag
);
213 nvme_req(req
)->status
= NVME_SC_ABORT_REQ
;
214 blk_mq_complete_request(req
);
217 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
219 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
220 enum nvme_ctrl_state new_state
)
222 enum nvme_ctrl_state old_state
;
224 bool changed
= false;
226 spin_lock_irqsave(&ctrl
->lock
, flags
);
228 old_state
= ctrl
->state
;
233 case NVME_CTRL_RESETTING
:
234 case NVME_CTRL_RECONNECTING
:
241 case NVME_CTRL_RESETTING
:
251 case NVME_CTRL_RECONNECTING
:
254 case NVME_CTRL_RESETTING
:
261 case NVME_CTRL_DELETING
:
264 case NVME_CTRL_RESETTING
:
265 case NVME_CTRL_RECONNECTING
:
274 case NVME_CTRL_DELETING
:
286 ctrl
->state
= new_state
;
288 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
292 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
294 static void nvme_free_ns(struct kref
*kref
)
296 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
299 nvme_nvm_unregister(ns
);
302 ida_simple_remove(&ns
->ctrl
->ns_ida
, ns
->instance
);
303 nvme_put_ctrl(ns
->ctrl
);
307 static void nvme_put_ns(struct nvme_ns
*ns
)
309 kref_put(&ns
->kref
, nvme_free_ns
);
312 struct request
*nvme_alloc_request(struct request_queue
*q
,
313 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
, int qid
)
315 unsigned op
= nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
318 if (qid
== NVME_QID_ANY
) {
319 req
= blk_mq_alloc_request(q
, op
, flags
);
321 req
= blk_mq_alloc_request_hctx(q
, op
, flags
,
327 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
328 nvme_req(req
)->cmd
= cmd
;
332 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
334 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
336 struct nvme_command c
;
338 memset(&c
, 0, sizeof(c
));
340 c
.directive
.opcode
= nvme_admin_directive_send
;
341 c
.directive
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
342 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
343 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
344 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
345 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
347 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
350 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
352 return nvme_toggle_streams(ctrl
, false);
355 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
357 return nvme_toggle_streams(ctrl
, true);
360 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
361 struct streams_directive_params
*s
, u32 nsid
)
363 struct nvme_command c
;
365 memset(&c
, 0, sizeof(c
));
366 memset(s
, 0, sizeof(*s
));
368 c
.directive
.opcode
= nvme_admin_directive_recv
;
369 c
.directive
.nsid
= cpu_to_le32(nsid
);
370 c
.directive
.numd
= cpu_to_le32((sizeof(*s
) >> 2) - 1);
371 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
372 c
.directive
.dtype
= NVME_DIR_STREAMS
;
374 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
377 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
379 struct streams_directive_params s
;
382 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
387 ret
= nvme_enable_streams(ctrl
);
391 ret
= nvme_get_stream_params(ctrl
, &s
, NVME_NSID_ALL
);
395 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
396 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
397 dev_info(ctrl
->device
, "too few streams (%u) available\n",
399 nvme_disable_streams(ctrl
);
403 ctrl
->nr_streams
= min_t(unsigned, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
404 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
409 * Check if 'req' has a write hint associated with it. If it does, assign
410 * a valid namespace stream to the write.
412 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
413 struct request
*req
, u16
*control
,
416 enum rw_hint streamid
= req
->write_hint
;
418 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
422 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
425 *control
|= NVME_RW_DTYPE_STREAMS
;
426 *dsmgmt
|= streamid
<< 16;
429 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
430 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
433 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
434 struct nvme_command
*cmnd
)
436 memset(cmnd
, 0, sizeof(*cmnd
));
437 cmnd
->common
.opcode
= nvme_cmd_flush
;
438 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
441 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
442 struct nvme_command
*cmnd
)
444 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
445 struct nvme_dsm_range
*range
;
448 range
= kmalloc_array(segments
, sizeof(*range
), GFP_ATOMIC
);
450 return BLK_STS_RESOURCE
;
452 __rq_for_each_bio(bio
, req
) {
453 u64 slba
= nvme_block_nr(ns
, bio
->bi_iter
.bi_sector
);
454 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
456 range
[n
].cattr
= cpu_to_le32(0);
457 range
[n
].nlb
= cpu_to_le32(nlb
);
458 range
[n
].slba
= cpu_to_le64(slba
);
462 if (WARN_ON_ONCE(n
!= segments
)) {
464 return BLK_STS_IOERR
;
467 memset(cmnd
, 0, sizeof(*cmnd
));
468 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
469 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->ns_id
);
470 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
471 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
473 req
->special_vec
.bv_page
= virt_to_page(range
);
474 req
->special_vec
.bv_offset
= offset_in_page(range
);
475 req
->special_vec
.bv_len
= sizeof(*range
) * segments
;
476 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
481 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
482 struct request
*req
, struct nvme_command
*cmnd
)
484 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
488 if (req
->cmd_flags
& REQ_FUA
)
489 control
|= NVME_RW_FUA
;
490 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
491 control
|= NVME_RW_LR
;
493 if (req
->cmd_flags
& REQ_RAHEAD
)
494 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
496 memset(cmnd
, 0, sizeof(*cmnd
));
497 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
498 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
499 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
500 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
502 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
503 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
507 * If formated with metadata, the block layer always provides a
508 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
509 * we enable the PRACT bit for protection information or set the
510 * namespace capacity to zero to prevent any I/O.
512 if (!blk_integrity_rq(req
)) {
513 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
514 return BLK_STS_NOTSUPP
;
515 control
|= NVME_RW_PRINFO_PRACT
;
518 switch (ns
->pi_type
) {
519 case NVME_NS_DPS_PI_TYPE3
:
520 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
522 case NVME_NS_DPS_PI_TYPE1
:
523 case NVME_NS_DPS_PI_TYPE2
:
524 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
525 NVME_RW_PRINFO_PRCHK_REF
;
526 cmnd
->rw
.reftag
= cpu_to_le32(
527 nvme_block_nr(ns
, blk_rq_pos(req
)));
532 cmnd
->rw
.control
= cpu_to_le16(control
);
533 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
537 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
538 struct nvme_command
*cmd
)
540 blk_status_t ret
= BLK_STS_OK
;
542 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
543 nvme_req(req
)->retries
= 0;
544 nvme_req(req
)->flags
= 0;
545 req
->rq_flags
|= RQF_DONTPREP
;
548 switch (req_op(req
)) {
551 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
554 nvme_setup_flush(ns
, cmd
);
556 case REQ_OP_WRITE_ZEROES
:
557 /* currently only aliased to deallocate for a few ctrls: */
559 ret
= nvme_setup_discard(ns
, req
, cmd
);
563 ret
= nvme_setup_rw(ns
, req
, cmd
);
567 return BLK_STS_IOERR
;
570 cmd
->common
.command_id
= req
->tag
;
573 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
576 * Returns 0 on success. If the result is negative, it's a Linux error code;
577 * if the result is positive, it's an NVM Express status code
579 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
580 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
581 unsigned timeout
, int qid
, int at_head
,
582 blk_mq_req_flags_t flags
)
587 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
591 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
593 if (buffer
&& bufflen
) {
594 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
599 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
601 *result
= nvme_req(req
)->result
;
602 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
605 ret
= nvme_req(req
)->status
;
607 blk_mq_free_request(req
);
610 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
612 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
613 void *buffer
, unsigned bufflen
)
615 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
618 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
620 static void *nvme_add_user_metadata(struct bio
*bio
, void __user
*ubuf
,
621 unsigned len
, u32 seed
, bool write
)
623 struct bio_integrity_payload
*bip
;
627 buf
= kmalloc(len
, GFP_KERNEL
);
632 if (write
&& copy_from_user(buf
, ubuf
, len
))
635 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
641 bip
->bip_iter
.bi_size
= len
;
642 bip
->bip_iter
.bi_sector
= seed
;
643 ret
= bio_integrity_add_page(bio
, virt_to_page(buf
), len
,
644 offset_in_page(buf
));
654 static int nvme_submit_user_cmd(struct request_queue
*q
,
655 struct nvme_command
*cmd
, void __user
*ubuffer
,
656 unsigned bufflen
, void __user
*meta_buffer
, unsigned meta_len
,
657 u32 meta_seed
, u32
*result
, unsigned timeout
)
659 bool write
= nvme_is_write(cmd
);
660 struct nvme_ns
*ns
= q
->queuedata
;
661 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
663 struct bio
*bio
= NULL
;
667 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
671 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
673 if (ubuffer
&& bufflen
) {
674 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
680 if (disk
&& meta_buffer
&& meta_len
) {
681 meta
= nvme_add_user_metadata(bio
, meta_buffer
, meta_len
,
690 blk_execute_rq(req
->q
, disk
, req
, 0);
691 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
694 ret
= nvme_req(req
)->status
;
696 *result
= le32_to_cpu(nvme_req(req
)->result
.u32
);
697 if (meta
&& !ret
&& !write
) {
698 if (copy_to_user(meta_buffer
, meta
, meta_len
))
704 blk_rq_unmap_user(bio
);
706 blk_mq_free_request(req
);
710 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
712 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
714 blk_mq_free_request(rq
);
717 dev_err(ctrl
->device
,
718 "failed nvme_keep_alive_end_io error=%d\n",
723 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
726 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
728 struct nvme_command c
;
731 memset(&c
, 0, sizeof(c
));
732 c
.common
.opcode
= nvme_admin_keep_alive
;
734 rq
= nvme_alloc_request(ctrl
->admin_q
, &c
, BLK_MQ_REQ_RESERVED
,
739 rq
->timeout
= ctrl
->kato
* HZ
;
740 rq
->end_io_data
= ctrl
;
742 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
747 static void nvme_keep_alive_work(struct work_struct
*work
)
749 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
750 struct nvme_ctrl
, ka_work
);
752 if (nvme_keep_alive(ctrl
)) {
753 /* allocation failure, reset the controller */
754 dev_err(ctrl
->device
, "keep-alive failed\n");
755 nvme_reset_ctrl(ctrl
);
760 void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
762 if (unlikely(ctrl
->kato
== 0))
765 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
766 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
768 EXPORT_SYMBOL_GPL(nvme_start_keep_alive
);
770 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
772 if (unlikely(ctrl
->kato
== 0))
775 cancel_delayed_work_sync(&ctrl
->ka_work
);
777 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
779 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
781 struct nvme_command c
= { };
784 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
785 c
.identify
.opcode
= nvme_admin_identify
;
786 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
788 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
792 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
793 sizeof(struct nvme_id_ctrl
));
799 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
, unsigned nsid
,
800 u8
*eui64
, u8
*nguid
, uuid_t
*uuid
)
802 struct nvme_command c
= { };
808 c
.identify
.opcode
= nvme_admin_identify
;
809 c
.identify
.nsid
= cpu_to_le32(nsid
);
810 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
812 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
816 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
817 NVME_IDENTIFY_DATA_SIZE
);
821 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
822 struct nvme_ns_id_desc
*cur
= data
+ pos
;
828 case NVME_NIDT_EUI64
:
829 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
830 dev_warn(ctrl
->device
,
831 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
835 len
= NVME_NIDT_EUI64_LEN
;
836 memcpy(eui64
, data
+ pos
+ sizeof(*cur
), len
);
838 case NVME_NIDT_NGUID
:
839 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
840 dev_warn(ctrl
->device
,
841 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
845 len
= NVME_NIDT_NGUID_LEN
;
846 memcpy(nguid
, data
+ pos
+ sizeof(*cur
), len
);
849 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
850 dev_warn(ctrl
->device
,
851 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
855 len
= NVME_NIDT_UUID_LEN
;
856 uuid_copy(uuid
, data
+ pos
+ sizeof(*cur
));
859 /* Skip unnkown types */
871 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
873 struct nvme_command c
= { };
875 c
.identify
.opcode
= nvme_admin_identify
;
876 c
.identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
;
877 c
.identify
.nsid
= cpu_to_le32(nsid
);
878 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
881 static struct nvme_id_ns
*nvme_identify_ns(struct nvme_ctrl
*ctrl
,
884 struct nvme_id_ns
*id
;
885 struct nvme_command c
= { };
888 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
889 c
.identify
.opcode
= nvme_admin_identify
;
890 c
.identify
.nsid
= cpu_to_le32(nsid
);
891 c
.identify
.cns
= NVME_ID_CNS_NS
;
893 id
= kmalloc(sizeof(*id
), GFP_KERNEL
);
897 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
899 dev_warn(ctrl
->device
, "Identify namespace failed\n");
907 static int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
908 void *buffer
, size_t buflen
, u32
*result
)
910 struct nvme_command c
;
911 union nvme_result res
;
914 memset(&c
, 0, sizeof(c
));
915 c
.features
.opcode
= nvme_admin_set_features
;
916 c
.features
.fid
= cpu_to_le32(fid
);
917 c
.features
.dword11
= cpu_to_le32(dword11
);
919 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
920 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0);
921 if (ret
>= 0 && result
)
922 *result
= le32_to_cpu(res
.u32
);
926 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
928 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
930 int status
, nr_io_queues
;
932 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
938 * Degraded controllers might return an error when setting the queue
939 * count. We still want to be able to bring them online and offer
940 * access to the admin queue, as that might be only way to fix them up.
943 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
946 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
947 *count
= min(*count
, nr_io_queues
);
952 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
954 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
956 struct nvme_user_io io
;
957 struct nvme_command c
;
958 unsigned length
, meta_len
;
959 void __user
*metadata
;
961 if (copy_from_user(&io
, uio
, sizeof(io
)))
969 case nvme_cmd_compare
:
975 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
976 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
977 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
982 } else if (meta_len
) {
983 if ((io
.metadata
& 3) || !io
.metadata
)
987 memset(&c
, 0, sizeof(c
));
988 c
.rw
.opcode
= io
.opcode
;
989 c
.rw
.flags
= io
.flags
;
990 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
991 c
.rw
.slba
= cpu_to_le64(io
.slba
);
992 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
993 c
.rw
.control
= cpu_to_le16(io
.control
);
994 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
995 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
996 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
997 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
999 return nvme_submit_user_cmd(ns
->queue
, &c
,
1000 (void __user
*)(uintptr_t)io
.addr
, length
,
1001 metadata
, meta_len
, io
.slba
, NULL
, 0);
1004 static u32
nvme_known_admin_effects(u8 opcode
)
1007 case nvme_admin_format_nvm
:
1008 return NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
|
1009 NVME_CMD_EFFECTS_CSE_MASK
;
1010 case nvme_admin_sanitize_nvm
:
1011 return NVME_CMD_EFFECTS_CSE_MASK
;
1018 static u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1025 effects
= le32_to_cpu(ctrl
->effects
->iocs
[opcode
]);
1026 if (effects
& ~NVME_CMD_EFFECTS_CSUPP
)
1027 dev_warn(ctrl
->device
,
1028 "IO command:%02x has unhandled effects:%08x\n",
1034 effects
= le32_to_cpu(ctrl
->effects
->iocs
[opcode
]);
1036 effects
= nvme_known_admin_effects(opcode
);
1039 * For simplicity, IO to all namespaces is quiesced even if the command
1040 * effects say only one namespace is affected.
1042 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
)) {
1043 nvme_start_freeze(ctrl
);
1044 nvme_wait_freeze(ctrl
);
1049 static void nvme_update_formats(struct nvme_ctrl
*ctrl
)
1053 mutex_lock(&ctrl
->namespaces_mutex
);
1054 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
1055 if (ns
->disk
&& nvme_revalidate_disk(ns
->disk
))
1058 mutex_unlock(&ctrl
->namespaces_mutex
);
1061 static void nvme_passthru_end(struct nvme_ctrl
*ctrl
, u32 effects
)
1064 * Revalidate LBA changes prior to unfreezing. This is necessary to
1065 * prevent memory corruption if a logical block size was changed by
1068 if (effects
& NVME_CMD_EFFECTS_LBCC
)
1069 nvme_update_formats(ctrl
);
1070 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
))
1071 nvme_unfreeze(ctrl
);
1072 if (effects
& NVME_CMD_EFFECTS_CCC
)
1073 nvme_init_identify(ctrl
);
1074 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
))
1075 nvme_queue_scan(ctrl
);
1078 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1079 struct nvme_passthru_cmd __user
*ucmd
)
1081 struct nvme_passthru_cmd cmd
;
1082 struct nvme_command c
;
1083 unsigned timeout
= 0;
1087 if (!capable(CAP_SYS_ADMIN
))
1089 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1094 memset(&c
, 0, sizeof(c
));
1095 c
.common
.opcode
= cmd
.opcode
;
1096 c
.common
.flags
= cmd
.flags
;
1097 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1098 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1099 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1100 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
1101 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
1102 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
1103 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
1104 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
1105 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
1108 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1110 effects
= nvme_passthru_start(ctrl
, ns
, cmd
.opcode
);
1111 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1112 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1113 (void __user
*)(uintptr_t)cmd
.metadata
, cmd
.metadata
,
1114 0, &cmd
.result
, timeout
);
1115 nvme_passthru_end(ctrl
, effects
);
1118 if (put_user(cmd
.result
, &ucmd
->result
))
1125 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
1126 unsigned int cmd
, unsigned long arg
)
1128 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1132 force_successful_syscall_return();
1134 case NVME_IOCTL_ADMIN_CMD
:
1135 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
1136 case NVME_IOCTL_IO_CMD
:
1137 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
1138 case NVME_IOCTL_SUBMIT_IO
:
1139 return nvme_submit_io(ns
, (void __user
*)arg
);
1143 return nvme_nvm_ioctl(ns
, cmd
, arg
);
1145 if (is_sed_ioctl(cmd
))
1146 return sed_ioctl(ns
->ctrl
->opal_dev
, cmd
,
1147 (void __user
*) arg
);
1152 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1154 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1156 if (!kref_get_unless_zero(&ns
->kref
))
1161 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1163 nvme_put_ns(disk
->private_data
);
1166 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1168 /* some standard values */
1169 geo
->heads
= 1 << 6;
1170 geo
->sectors
= 1 << 5;
1171 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1175 #ifdef CONFIG_BLK_DEV_INTEGRITY
1176 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1178 struct blk_integrity integrity
;
1180 memset(&integrity
, 0, sizeof(integrity
));
1182 case NVME_NS_DPS_PI_TYPE3
:
1183 integrity
.profile
= &t10_pi_type3_crc
;
1184 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1185 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1187 case NVME_NS_DPS_PI_TYPE1
:
1188 case NVME_NS_DPS_PI_TYPE2
:
1189 integrity
.profile
= &t10_pi_type1_crc
;
1190 integrity
.tag_size
= sizeof(u16
);
1191 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1194 integrity
.profile
= NULL
;
1197 integrity
.tuple_size
= ms
;
1198 blk_integrity_register(disk
, &integrity
);
1199 blk_queue_max_integrity_segments(disk
->queue
, 1);
1202 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1205 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1207 static void nvme_set_chunk_size(struct nvme_ns
*ns
)
1209 u32 chunk_size
= (((u32
)ns
->noiob
) << (ns
->lba_shift
- 9));
1210 blk_queue_chunk_sectors(ns
->queue
, rounddown_pow_of_two(chunk_size
));
1213 static void nvme_config_discard(struct nvme_ctrl
*ctrl
,
1214 unsigned stream_alignment
, struct request_queue
*queue
)
1216 u32 size
= queue_logical_block_size(queue
);
1218 if (stream_alignment
)
1219 size
*= stream_alignment
;
1221 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1222 NVME_DSM_MAX_RANGES
);
1224 queue
->limits
.discard_alignment
= size
;
1225 queue
->limits
.discard_granularity
= size
;
1227 blk_queue_max_discard_sectors(queue
, UINT_MAX
);
1228 blk_queue_max_discard_segments(queue
, NVME_DSM_MAX_RANGES
);
1229 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, queue
);
1231 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1232 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1235 static void nvme_report_ns_ids(struct nvme_ctrl
*ctrl
, unsigned int nsid
,
1236 struct nvme_id_ns
*id
, u8
*eui64
, u8
*nguid
, uuid_t
*uuid
)
1238 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1239 memcpy(eui64
, id
->eui64
, sizeof(id
->eui64
));
1240 if (ctrl
->vs
>= NVME_VS(1, 2, 0))
1241 memcpy(nguid
, id
->nguid
, sizeof(id
->nguid
));
1242 if (ctrl
->vs
>= NVME_VS(1, 3, 0)) {
1243 /* Don't treat error as fatal we potentially
1244 * already have a NGUID or EUI-64
1246 if (nvme_identify_ns_descs(ctrl
, nsid
, eui64
, nguid
, uuid
))
1247 dev_warn(ctrl
->device
,
1248 "%s: Identify Descriptors failed\n", __func__
);
1252 static void nvme_update_disk_info(struct gendisk
*disk
,
1253 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1255 sector_t capacity
= le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9);
1256 unsigned stream_alignment
= 0;
1258 if (ns
->ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
)
1259 stream_alignment
= ns
->sws
* ns
->sgs
;
1261 blk_mq_freeze_queue(disk
->queue
);
1262 blk_integrity_unregister(disk
);
1264 blk_queue_logical_block_size(disk
->queue
, 1 << ns
->lba_shift
);
1265 if (ns
->ms
&& !ns
->ext
&&
1266 (ns
->ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
1267 nvme_init_integrity(disk
, ns
->ms
, ns
->pi_type
);
1268 if (ns
->ms
&& !nvme_ns_has_pi(ns
) && !blk_get_integrity(disk
))
1270 set_capacity(disk
, capacity
);
1272 if (ns
->ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
1273 nvme_config_discard(ns
->ctrl
, stream_alignment
, disk
->queue
);
1274 blk_mq_unfreeze_queue(disk
->queue
);
1277 static void __nvme_revalidate_disk(struct gendisk
*disk
, struct nvme_id_ns
*id
)
1279 struct nvme_ns
*ns
= disk
->private_data
;
1282 * If identify namespace failed, use default 512 byte block size so
1283 * block layer can use before failing read/write for 0 capacity.
1285 ns
->lba_shift
= id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ds
;
1286 if (ns
->lba_shift
== 0)
1288 ns
->noiob
= le16_to_cpu(id
->noiob
);
1289 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
1290 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1291 /* the PI implementation requires metadata equal t10 pi tuple size */
1292 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1293 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1298 nvme_set_chunk_size(ns
);
1299 nvme_update_disk_info(disk
, ns
, id
);
1302 static int nvme_revalidate_disk(struct gendisk
*disk
)
1304 struct nvme_ns
*ns
= disk
->private_data
;
1305 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1306 struct nvme_id_ns
*id
;
1307 u8 eui64
[8] = { 0 }, nguid
[16] = { 0 };
1308 uuid_t uuid
= uuid_null
;
1311 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
1312 set_capacity(disk
, 0);
1316 id
= nvme_identify_ns(ctrl
, ns
->ns_id
);
1320 if (id
->ncap
== 0) {
1325 nvme_report_ns_ids(ctrl
, ns
->ns_id
, id
, eui64
, nguid
, &uuid
);
1326 if (!uuid_equal(&ns
->uuid
, &uuid
) ||
1327 memcmp(&ns
->nguid
, &nguid
, sizeof(ns
->nguid
)) ||
1328 memcmp(&ns
->eui
, &eui64
, sizeof(ns
->eui
))) {
1329 dev_err(ctrl
->device
,
1330 "identifiers changed for nsid %d\n", ns
->ns_id
);
1339 static char nvme_pr_type(enum pr_type type
)
1342 case PR_WRITE_EXCLUSIVE
:
1344 case PR_EXCLUSIVE_ACCESS
:
1346 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1348 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1350 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1352 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1359 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1360 u64 key
, u64 sa_key
, u8 op
)
1362 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1363 struct nvme_command c
;
1364 u8 data
[16] = { 0, };
1366 put_unaligned_le64(key
, &data
[0]);
1367 put_unaligned_le64(sa_key
, &data
[8]);
1369 memset(&c
, 0, sizeof(c
));
1370 c
.common
.opcode
= op
;
1371 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
1372 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
1374 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1377 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1378 u64
new, unsigned flags
)
1382 if (flags
& ~PR_FL_IGNORE_KEY
)
1385 cdw10
= old
? 2 : 0;
1386 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1387 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1388 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1391 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1392 enum pr_type type
, unsigned flags
)
1396 if (flags
& ~PR_FL_IGNORE_KEY
)
1399 cdw10
= nvme_pr_type(type
) << 8;
1400 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1401 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1404 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1405 enum pr_type type
, bool abort
)
1407 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
1408 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1411 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1413 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1414 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1417 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1419 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
1420 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1423 static const struct pr_ops nvme_pr_ops
= {
1424 .pr_register
= nvme_pr_register
,
1425 .pr_reserve
= nvme_pr_reserve
,
1426 .pr_release
= nvme_pr_release
,
1427 .pr_preempt
= nvme_pr_preempt
,
1428 .pr_clear
= nvme_pr_clear
,
1431 #ifdef CONFIG_BLK_SED_OPAL
1432 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
1435 struct nvme_ctrl
*ctrl
= data
;
1436 struct nvme_command cmd
;
1438 memset(&cmd
, 0, sizeof(cmd
));
1440 cmd
.common
.opcode
= nvme_admin_security_send
;
1442 cmd
.common
.opcode
= nvme_admin_security_recv
;
1443 cmd
.common
.nsid
= 0;
1444 cmd
.common
.cdw10
[0] = cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
1445 cmd
.common
.cdw10
[1] = cpu_to_le32(len
);
1447 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
1448 ADMIN_TIMEOUT
, NVME_QID_ANY
, 1, 0);
1450 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
1451 #endif /* CONFIG_BLK_SED_OPAL */
1453 static const struct block_device_operations nvme_fops
= {
1454 .owner
= THIS_MODULE
,
1455 .ioctl
= nvme_ioctl
,
1456 .compat_ioctl
= nvme_ioctl
,
1458 .release
= nvme_release
,
1459 .getgeo
= nvme_getgeo
,
1460 .revalidate_disk
= nvme_revalidate_disk
,
1461 .pr_ops
= &nvme_pr_ops
,
1464 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
1466 unsigned long timeout
=
1467 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
1468 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
1471 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1474 if ((csts
& NVME_CSTS_RDY
) == bit
)
1478 if (fatal_signal_pending(current
))
1480 if (time_after(jiffies
, timeout
)) {
1481 dev_err(ctrl
->device
,
1482 "Device not ready; aborting %s\n", enabled
?
1483 "initialisation" : "reset");
1492 * If the device has been passed off to us in an enabled state, just clear
1493 * the enabled bit. The spec says we should set the 'shutdown notification
1494 * bits', but doing so may cause the device to complete commands to the
1495 * admin queue ... and we don't know what memory that might be pointing at!
1497 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1501 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1502 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
1504 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1508 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
1509 msleep(NVME_QUIRK_DELAY_AMOUNT
);
1511 return nvme_wait_ready(ctrl
, cap
, false);
1513 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
1515 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1518 * Default to a 4K page size, with the intention to update this
1519 * path in the future to accomodate architectures with differing
1520 * kernel and IO page sizes.
1522 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
1525 if (page_shift
< dev_page_min
) {
1526 dev_err(ctrl
->device
,
1527 "Minimum device page size %u too large for host (%u)\n",
1528 1 << dev_page_min
, 1 << page_shift
);
1532 ctrl
->page_size
= 1 << page_shift
;
1534 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
1535 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
1536 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
1537 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
1538 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
1540 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1543 return nvme_wait_ready(ctrl
, cap
, true);
1545 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
1547 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
1549 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
1553 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1554 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
1556 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1560 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1561 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
1565 if (fatal_signal_pending(current
))
1567 if (time_after(jiffies
, timeout
)) {
1568 dev_err(ctrl
->device
,
1569 "Device shutdown incomplete; abort shutdown\n");
1576 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
1578 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1579 struct request_queue
*q
)
1583 if (ctrl
->max_hw_sectors
) {
1585 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1587 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1588 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1590 if (ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
)
1591 blk_queue_chunk_sectors(q
, ctrl
->max_hw_sectors
);
1592 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1593 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1595 blk_queue_write_cache(q
, vwc
, vwc
);
1598 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
1603 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
1606 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
1607 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
1610 dev_warn_once(ctrl
->device
,
1611 "could not set timestamp (%d)\n", ret
);
1615 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
1618 * APST (Autonomous Power State Transition) lets us program a
1619 * table of power state transitions that the controller will
1620 * perform automatically. We configure it with a simple
1621 * heuristic: we are willing to spend at most 2% of the time
1622 * transitioning between power states. Therefore, when running
1623 * in any given state, we will enter the next lower-power
1624 * non-operational state after waiting 50 * (enlat + exlat)
1625 * microseconds, as long as that state's exit latency is under
1626 * the requested maximum latency.
1628 * We will not autonomously enter any non-operational state for
1629 * which the total latency exceeds ps_max_latency_us. Users
1630 * can set ps_max_latency_us to zero to turn off APST.
1634 struct nvme_feat_auto_pst
*table
;
1640 * If APST isn't supported or if we haven't been initialized yet,
1641 * then don't do anything.
1646 if (ctrl
->npss
> 31) {
1647 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
1651 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
1655 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
1656 /* Turn off APST. */
1658 dev_dbg(ctrl
->device
, "APST disabled\n");
1660 __le64 target
= cpu_to_le64(0);
1664 * Walk through all states from lowest- to highest-power.
1665 * According to the spec, lower-numbered states use more
1666 * power. NPSS, despite the name, is the index of the
1667 * lowest-power state, not the number of states.
1669 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
1670 u64 total_latency_us
, exit_latency_us
, transition_ms
;
1673 table
->entries
[state
] = target
;
1676 * Don't allow transitions to the deepest state
1677 * if it's quirked off.
1679 if (state
== ctrl
->npss
&&
1680 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
1684 * Is this state a useful non-operational state for
1685 * higher-power states to autonomously transition to?
1687 if (!(ctrl
->psd
[state
].flags
&
1688 NVME_PS_FLAGS_NON_OP_STATE
))
1692 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
1693 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
1698 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
1701 * This state is good. Use it as the APST idle
1702 * target for higher power states.
1704 transition_ms
= total_latency_us
+ 19;
1705 do_div(transition_ms
, 20);
1706 if (transition_ms
> (1 << 24) - 1)
1707 transition_ms
= (1 << 24) - 1;
1709 target
= cpu_to_le64((state
<< 3) |
1710 (transition_ms
<< 8));
1715 if (total_latency_us
> max_lat_us
)
1716 max_lat_us
= total_latency_us
;
1722 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
1724 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
1725 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
1729 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
1730 table
, sizeof(*table
), NULL
);
1732 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
1738 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
1740 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1744 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
1745 case PM_QOS_LATENCY_ANY
:
1753 if (ctrl
->ps_max_latency_us
!= latency
) {
1754 ctrl
->ps_max_latency_us
= latency
;
1755 nvme_configure_apst(ctrl
);
1759 struct nvme_core_quirk_entry
{
1761 * NVMe model and firmware strings are padded with spaces. For
1762 * simplicity, strings in the quirk table are padded with NULLs
1768 unsigned long quirks
;
1771 static const struct nvme_core_quirk_entry core_quirks
[] = {
1774 * This Toshiba device seems to die using any APST states. See:
1775 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
1778 .mn
= "THNSF5256GPUK TOSHIBA",
1779 .quirks
= NVME_QUIRK_NO_APST
,
1783 /* match is null-terminated but idstr is space-padded. */
1784 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
1791 matchlen
= strlen(match
);
1792 WARN_ON_ONCE(matchlen
> len
);
1794 if (memcmp(idstr
, match
, matchlen
))
1797 for (; matchlen
< len
; matchlen
++)
1798 if (idstr
[matchlen
] != ' ')
1804 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
1805 const struct nvme_core_quirk_entry
*q
)
1807 return q
->vid
== le16_to_cpu(id
->vid
) &&
1808 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
1809 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
1812 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
1813 struct nvme_id_ctrl
*id
)
1818 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
1819 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
1820 strncpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
1824 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
1825 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
1827 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
1828 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
1829 "nqn.2014.08.org.nvmexpress:%4x%4x",
1830 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
1831 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
1832 off
+= sizeof(id
->sn
);
1833 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
1834 off
+= sizeof(id
->mn
);
1835 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
1838 static void __nvme_release_subsystem(struct nvme_subsystem
*subsys
)
1840 ida_simple_remove(&nvme_subsystems_ida
, subsys
->instance
);
1844 static void nvme_release_subsystem(struct device
*dev
)
1846 __nvme_release_subsystem(container_of(dev
, struct nvme_subsystem
, dev
));
1849 static void nvme_destroy_subsystem(struct kref
*ref
)
1851 struct nvme_subsystem
*subsys
=
1852 container_of(ref
, struct nvme_subsystem
, ref
);
1854 mutex_lock(&nvme_subsystems_lock
);
1855 list_del(&subsys
->entry
);
1856 mutex_unlock(&nvme_subsystems_lock
);
1858 device_del(&subsys
->dev
);
1859 put_device(&subsys
->dev
);
1862 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
1864 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
1867 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
1869 struct nvme_subsystem
*subsys
;
1871 lockdep_assert_held(&nvme_subsystems_lock
);
1873 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
1874 if (strcmp(subsys
->subnqn
, subsysnqn
))
1876 if (!kref_get_unless_zero(&subsys
->ref
))
1884 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
1886 struct nvme_subsystem
*subsys
, *found
;
1889 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
1892 ret
= ida_simple_get(&nvme_subsystems_ida
, 0, 0, GFP_KERNEL
);
1897 subsys
->instance
= ret
;
1898 mutex_init(&subsys
->lock
);
1899 kref_init(&subsys
->ref
);
1900 INIT_LIST_HEAD(&subsys
->ctrls
);
1901 nvme_init_subnqn(subsys
, ctrl
, id
);
1902 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
1903 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
1904 memcpy(subsys
->firmware_rev
, id
->fr
, sizeof(subsys
->firmware_rev
));
1905 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
1906 subsys
->cmic
= id
->cmic
;
1908 subsys
->dev
.class = nvme_subsys_class
;
1909 subsys
->dev
.release
= nvme_release_subsystem
;
1910 dev_set_name(&subsys
->dev
, "nvme-subsys%d", subsys
->instance
);
1911 device_initialize(&subsys
->dev
);
1913 mutex_lock(&nvme_subsystems_lock
);
1914 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
1917 * Verify that the subsystem actually supports multiple
1918 * controllers, else bail out.
1920 if (!(id
->cmic
& (1 << 1))) {
1921 dev_err(ctrl
->device
,
1922 "ignoring ctrl due to duplicate subnqn (%s).\n",
1924 nvme_put_subsystem(found
);
1929 __nvme_release_subsystem(subsys
);
1932 ret
= device_add(&subsys
->dev
);
1934 dev_err(ctrl
->device
,
1935 "failed to register subsystem device.\n");
1938 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
1941 ctrl
->subsys
= subsys
;
1942 mutex_unlock(&nvme_subsystems_lock
);
1944 if (sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
1945 dev_name(ctrl
->device
))) {
1946 dev_err(ctrl
->device
,
1947 "failed to create sysfs link from subsystem.\n");
1948 /* the transport driver will eventually put the subsystem */
1952 mutex_lock(&subsys
->lock
);
1953 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
1954 mutex_unlock(&subsys
->lock
);
1959 mutex_unlock(&nvme_subsystems_lock
);
1960 put_device(&subsys
->dev
);
1964 static int nvme_get_log(struct nvme_ctrl
*ctrl
, u8 log_page
, void *log
,
1967 struct nvme_command c
= { };
1969 c
.common
.opcode
= nvme_admin_get_log_page
;
1970 c
.common
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
1971 c
.common
.cdw10
[0] = nvme_get_log_dw10(log_page
, size
);
1973 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
1976 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
)
1981 ctrl
->effects
= kzalloc(sizeof(*ctrl
->effects
), GFP_KERNEL
);
1986 ret
= nvme_get_log(ctrl
, NVME_LOG_CMD_EFFECTS
, ctrl
->effects
,
1987 sizeof(*ctrl
->effects
));
1989 kfree(ctrl
->effects
);
1990 ctrl
->effects
= NULL
;
1996 * Initialize the cached copies of the Identify data and various controller
1997 * register in our nvme_ctrl structure. This should be called as soon as
1998 * the admin queue is fully up and running.
2000 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
2002 struct nvme_id_ctrl
*id
;
2004 int ret
, page_shift
;
2006 bool prev_apst_enabled
;
2008 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
2010 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
2014 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
2016 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2019 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
2021 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
2022 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
2024 ret
= nvme_identify_ctrl(ctrl
, &id
);
2026 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
2030 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
2031 ret
= nvme_get_effects_log(ctrl
);
2036 if (!ctrl
->identified
) {
2039 ret
= nvme_init_subsystem(ctrl
, id
);
2044 * Check for quirks. Quirk can depend on firmware version,
2045 * so, in principle, the set of quirks present can change
2046 * across a reset. As a possible future enhancement, we
2047 * could re-scan for quirks every time we reinitialize
2048 * the device, but we'd have to make sure that the driver
2049 * behaves intelligently if the quirks change.
2051 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
2052 if (quirk_matches(id
, &core_quirks
[i
]))
2053 ctrl
->quirks
|= core_quirks
[i
].quirks
;
2057 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
2058 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2059 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
2062 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
2063 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
2064 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
2065 ctrl
->vwc
= id
->vwc
;
2066 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
2068 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
2070 max_hw_sectors
= UINT_MAX
;
2071 ctrl
->max_hw_sectors
=
2072 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
2074 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
2075 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
2076 ctrl
->kas
= le16_to_cpu(id
->kas
);
2080 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / 1000000;
2082 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
2083 shutdown_timeout
, 60);
2085 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
2086 dev_warn(ctrl
->device
,
2087 "Shutdown timeout set to %u seconds\n",
2088 ctrl
->shutdown_timeout
);
2090 ctrl
->shutdown_timeout
= shutdown_timeout
;
2092 ctrl
->npss
= id
->npss
;
2093 ctrl
->apsta
= id
->apsta
;
2094 prev_apst_enabled
= ctrl
->apst_enabled
;
2095 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
2096 if (force_apst
&& id
->apsta
) {
2097 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2098 ctrl
->apst_enabled
= true;
2100 ctrl
->apst_enabled
= false;
2103 ctrl
->apst_enabled
= id
->apsta
;
2105 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
2107 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
2108 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
2109 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
2110 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
2111 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
2114 * In fabrics we need to verify the cntlid matches the
2117 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
2122 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
2123 dev_err(ctrl
->device
,
2124 "keep-alive support is mandatory for fabrics\n");
2129 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
2130 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
2131 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
2132 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
2133 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
2138 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
2139 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
2140 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
2141 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
2143 ret
= nvme_configure_apst(ctrl
);
2147 ret
= nvme_configure_timestamp(ctrl
);
2151 ret
= nvme_configure_directives(ctrl
);
2155 ctrl
->identified
= true;
2163 EXPORT_SYMBOL_GPL(nvme_init_identify
);
2165 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
2167 struct nvme_ctrl
*ctrl
=
2168 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
2170 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2171 return -EWOULDBLOCK
;
2172 file
->private_data
= ctrl
;
2176 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
2181 mutex_lock(&ctrl
->namespaces_mutex
);
2182 if (list_empty(&ctrl
->namespaces
)) {
2187 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
2188 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
2189 dev_warn(ctrl
->device
,
2190 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
2195 dev_warn(ctrl
->device
,
2196 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
2197 kref_get(&ns
->kref
);
2198 mutex_unlock(&ctrl
->namespaces_mutex
);
2200 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
2205 mutex_unlock(&ctrl
->namespaces_mutex
);
2209 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
2212 struct nvme_ctrl
*ctrl
= file
->private_data
;
2213 void __user
*argp
= (void __user
*)arg
;
2216 case NVME_IOCTL_ADMIN_CMD
:
2217 return nvme_user_cmd(ctrl
, NULL
, argp
);
2218 case NVME_IOCTL_IO_CMD
:
2219 return nvme_dev_user_cmd(ctrl
, argp
);
2220 case NVME_IOCTL_RESET
:
2221 dev_warn(ctrl
->device
, "resetting controller\n");
2222 return nvme_reset_ctrl_sync(ctrl
);
2223 case NVME_IOCTL_SUBSYS_RESET
:
2224 return nvme_reset_subsystem(ctrl
);
2225 case NVME_IOCTL_RESCAN
:
2226 nvme_queue_scan(ctrl
);
2233 static const struct file_operations nvme_dev_fops
= {
2234 .owner
= THIS_MODULE
,
2235 .open
= nvme_dev_open
,
2236 .unlocked_ioctl
= nvme_dev_ioctl
,
2237 .compat_ioctl
= nvme_dev_ioctl
,
2240 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
2241 struct device_attribute
*attr
, const char *buf
,
2244 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2247 ret
= nvme_reset_ctrl_sync(ctrl
);
2252 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
2254 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
2255 struct device_attribute
*attr
, const char *buf
,
2258 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2260 nvme_queue_scan(ctrl
);
2263 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
2265 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
2268 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2269 struct nvme_subsystem
*subsys
= ns
->ctrl
->subsys
;
2270 int serial_len
= sizeof(subsys
->serial
);
2271 int model_len
= sizeof(subsys
->model
);
2273 if (!uuid_is_null(&ns
->uuid
))
2274 return sprintf(buf
, "uuid.%pU\n", &ns
->uuid
);
2276 if (memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2277 return sprintf(buf
, "eui.%16phN\n", ns
->nguid
);
2279 if (memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
2280 return sprintf(buf
, "eui.%8phN\n", ns
->eui
);
2282 while (serial_len
> 0 && (subsys
->serial
[serial_len
- 1] == ' ' ||
2283 subsys
->serial
[serial_len
- 1] == '\0'))
2285 while (model_len
> 0 && (subsys
->model
[model_len
- 1] == ' ' ||
2286 subsys
->model
[model_len
- 1] == '\0'))
2289 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", subsys
->vendor_id
,
2290 serial_len
, subsys
->serial
, model_len
, subsys
->model
,
2293 static DEVICE_ATTR(wwid
, S_IRUGO
, wwid_show
, NULL
);
2295 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
2298 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2299 return sprintf(buf
, "%pU\n", ns
->nguid
);
2301 static DEVICE_ATTR(nguid
, S_IRUGO
, nguid_show
, NULL
);
2303 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
2306 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2308 /* For backward compatibility expose the NGUID to userspace if
2309 * we have no UUID set
2311 if (uuid_is_null(&ns
->uuid
)) {
2312 printk_ratelimited(KERN_WARNING
2313 "No UUID available providing old NGUID\n");
2314 return sprintf(buf
, "%pU\n", ns
->nguid
);
2316 return sprintf(buf
, "%pU\n", &ns
->uuid
);
2318 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
2320 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
2323 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2324 return sprintf(buf
, "%8ph\n", ns
->eui
);
2326 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
2328 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
2331 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2332 return sprintf(buf
, "%d\n", ns
->ns_id
);
2334 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
2336 static struct attribute
*nvme_ns_attrs
[] = {
2337 &dev_attr_wwid
.attr
,
2338 &dev_attr_uuid
.attr
,
2339 &dev_attr_nguid
.attr
,
2341 &dev_attr_nsid
.attr
,
2345 static umode_t
nvme_ns_attrs_are_visible(struct kobject
*kobj
,
2346 struct attribute
*a
, int n
)
2348 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2349 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2351 if (a
== &dev_attr_uuid
.attr
) {
2352 if (uuid_is_null(&ns
->uuid
) ||
2353 !memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2356 if (a
== &dev_attr_nguid
.attr
) {
2357 if (!memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2360 if (a
== &dev_attr_eui
.attr
) {
2361 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
2367 static const struct attribute_group nvme_ns_attr_group
= {
2368 .attrs
= nvme_ns_attrs
,
2369 .is_visible
= nvme_ns_attrs_are_visible
,
2372 #define nvme_show_str_function(field) \
2373 static ssize_t field##_show(struct device *dev, \
2374 struct device_attribute *attr, char *buf) \
2376 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2377 return sprintf(buf, "%.*s\n", \
2378 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
2380 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2382 nvme_show_str_function(model
);
2383 nvme_show_str_function(serial
);
2384 nvme_show_str_function(firmware_rev
);
2386 #define nvme_show_int_function(field) \
2387 static ssize_t field##_show(struct device *dev, \
2388 struct device_attribute *attr, char *buf) \
2390 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2391 return sprintf(buf, "%d\n", ctrl->field); \
2393 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2395 nvme_show_int_function(cntlid
);
2397 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
2398 struct device_attribute
*attr
, const char *buf
,
2401 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2403 if (device_remove_file_self(dev
, attr
))
2404 nvme_delete_ctrl_sync(ctrl
);
2407 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
2409 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
2410 struct device_attribute
*attr
,
2413 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2415 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
2417 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
2419 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
2420 struct device_attribute
*attr
,
2423 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2424 static const char *const state_name
[] = {
2425 [NVME_CTRL_NEW
] = "new",
2426 [NVME_CTRL_LIVE
] = "live",
2427 [NVME_CTRL_RESETTING
] = "resetting",
2428 [NVME_CTRL_RECONNECTING
]= "reconnecting",
2429 [NVME_CTRL_DELETING
] = "deleting",
2430 [NVME_CTRL_DEAD
] = "dead",
2433 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
2434 state_name
[ctrl
->state
])
2435 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
2437 return sprintf(buf
, "unknown state\n");
2440 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
2442 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
2443 struct device_attribute
*attr
,
2446 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2448 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->subsys
->subnqn
);
2450 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
2452 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
2453 struct device_attribute
*attr
,
2456 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2458 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
2460 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
2462 static struct attribute
*nvme_dev_attrs
[] = {
2463 &dev_attr_reset_controller
.attr
,
2464 &dev_attr_rescan_controller
.attr
,
2465 &dev_attr_model
.attr
,
2466 &dev_attr_serial
.attr
,
2467 &dev_attr_firmware_rev
.attr
,
2468 &dev_attr_cntlid
.attr
,
2469 &dev_attr_delete_controller
.attr
,
2470 &dev_attr_transport
.attr
,
2471 &dev_attr_subsysnqn
.attr
,
2472 &dev_attr_address
.attr
,
2473 &dev_attr_state
.attr
,
2477 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
2478 struct attribute
*a
, int n
)
2480 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2481 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2483 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
2485 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
2491 static struct attribute_group nvme_dev_attrs_group
= {
2492 .attrs
= nvme_dev_attrs
,
2493 .is_visible
= nvme_dev_attrs_are_visible
,
2496 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
2497 &nvme_dev_attrs_group
,
2501 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
2503 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
2504 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
2506 return nsa
->ns_id
- nsb
->ns_id
;
2509 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2511 struct nvme_ns
*ns
, *ret
= NULL
;
2513 mutex_lock(&ctrl
->namespaces_mutex
);
2514 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2515 if (ns
->ns_id
== nsid
) {
2516 if (!kref_get_unless_zero(&ns
->kref
))
2521 if (ns
->ns_id
> nsid
)
2524 mutex_unlock(&ctrl
->namespaces_mutex
);
2528 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
)
2530 struct streams_directive_params s
;
2533 if (!ctrl
->nr_streams
)
2536 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->ns_id
);
2540 ns
->sws
= le32_to_cpu(s
.sws
);
2541 ns
->sgs
= le16_to_cpu(s
.sgs
);
2544 unsigned int bs
= 1 << ns
->lba_shift
;
2546 blk_queue_io_min(ns
->queue
, bs
* ns
->sws
);
2548 blk_queue_io_opt(ns
->queue
, bs
* ns
->sws
* ns
->sgs
);
2554 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2557 struct gendisk
*disk
;
2558 struct nvme_id_ns
*id
;
2559 char disk_name
[DISK_NAME_LEN
];
2560 int node
= dev_to_node(ctrl
->dev
);
2562 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
2566 ns
->instance
= ida_simple_get(&ctrl
->ns_ida
, 1, 0, GFP_KERNEL
);
2567 if (ns
->instance
< 0)
2570 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
2571 if (IS_ERR(ns
->queue
))
2572 goto out_release_instance
;
2573 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
2574 ns
->queue
->queuedata
= ns
;
2577 kref_init(&ns
->kref
);
2579 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
2581 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
2582 nvme_set_queue_limits(ctrl
, ns
->queue
);
2583 nvme_setup_streams_ns(ctrl
, ns
);
2585 sprintf(disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->instance
);
2587 id
= nvme_identify_ns(ctrl
, nsid
);
2589 goto out_free_queue
;
2594 nvme_report_ns_ids(ctrl
, ns
->ns_id
, id
, ns
->eui
, ns
->nguid
, &ns
->uuid
);
2596 if ((ctrl
->quirks
& NVME_QUIRK_LIGHTNVM
) && id
->vs
[0] == 0x1) {
2597 if (nvme_nvm_register(ns
, disk_name
, node
)) {
2598 dev_warn(ctrl
->device
, "LightNVM init failure\n");
2603 disk
= alloc_disk_node(0, node
);
2607 disk
->fops
= &nvme_fops
;
2608 disk
->private_data
= ns
;
2609 disk
->queue
= ns
->queue
;
2610 disk
->flags
= GENHD_FL_EXT_DEVT
;
2611 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
2614 __nvme_revalidate_disk(disk
, id
);
2616 mutex_lock(&ctrl
->namespaces_mutex
);
2617 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
2618 mutex_unlock(&ctrl
->namespaces_mutex
);
2620 nvme_get_ctrl(ctrl
);
2624 device_add_disk(ctrl
->device
, ns
->disk
);
2625 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
2626 &nvme_ns_attr_group
))
2627 pr_warn("%s: failed to create sysfs group for identification\n",
2628 ns
->disk
->disk_name
);
2629 if (ns
->ndev
&& nvme_nvm_register_sysfs(ns
))
2630 pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
2631 ns
->disk
->disk_name
);
2636 blk_cleanup_queue(ns
->queue
);
2637 out_release_instance
:
2638 ida_simple_remove(&ctrl
->ns_ida
, ns
->instance
);
2643 static void nvme_ns_remove(struct nvme_ns
*ns
)
2645 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
2648 if (ns
->disk
&& ns
->disk
->flags
& GENHD_FL_UP
) {
2649 if (blk_get_integrity(ns
->disk
))
2650 blk_integrity_unregister(ns
->disk
);
2651 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
2652 &nvme_ns_attr_group
);
2654 nvme_nvm_unregister_sysfs(ns
);
2655 del_gendisk(ns
->disk
);
2656 blk_cleanup_queue(ns
->queue
);
2659 mutex_lock(&ns
->ctrl
->namespaces_mutex
);
2660 list_del_init(&ns
->list
);
2661 mutex_unlock(&ns
->ctrl
->namespaces_mutex
);
2666 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2670 ns
= nvme_find_get_ns(ctrl
, nsid
);
2672 if (ns
->disk
&& revalidate_disk(ns
->disk
))
2676 nvme_alloc_ns(ctrl
, nsid
);
2679 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
2682 struct nvme_ns
*ns
, *next
;
2684 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
2685 if (ns
->ns_id
> nsid
)
2690 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
2694 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
2697 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
2701 for (i
= 0; i
< num_lists
; i
++) {
2702 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
2706 for (j
= 0; j
< min(nn
, 1024U); j
++) {
2707 nsid
= le32_to_cpu(ns_list
[j
]);
2711 nvme_validate_ns(ctrl
, nsid
);
2713 while (++prev
< nsid
) {
2714 ns
= nvme_find_get_ns(ctrl
, prev
);
2724 nvme_remove_invalid_namespaces(ctrl
, prev
);
2730 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
2734 for (i
= 1; i
<= nn
; i
++)
2735 nvme_validate_ns(ctrl
, i
);
2737 nvme_remove_invalid_namespaces(ctrl
, nn
);
2740 static void nvme_scan_work(struct work_struct
*work
)
2742 struct nvme_ctrl
*ctrl
=
2743 container_of(work
, struct nvme_ctrl
, scan_work
);
2744 struct nvme_id_ctrl
*id
;
2747 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2750 if (nvme_identify_ctrl(ctrl
, &id
))
2753 nn
= le32_to_cpu(id
->nn
);
2754 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
2755 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
2756 if (!nvme_scan_ns_list(ctrl
, nn
))
2759 nvme_scan_ns_sequential(ctrl
, nn
);
2761 mutex_lock(&ctrl
->namespaces_mutex
);
2762 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
2763 mutex_unlock(&ctrl
->namespaces_mutex
);
2767 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
2770 * Do not queue new scan work when a controller is reset during
2773 if (ctrl
->state
== NVME_CTRL_LIVE
)
2774 queue_work(nvme_wq
, &ctrl
->scan_work
);
2776 EXPORT_SYMBOL_GPL(nvme_queue_scan
);
2779 * This function iterates the namespace list unlocked to allow recovery from
2780 * controller failure. It is up to the caller to ensure the namespace list is
2781 * not modified by scan work while this function is executing.
2783 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
2785 struct nvme_ns
*ns
, *next
;
2788 * The dead states indicates the controller was not gracefully
2789 * disconnected. In that case, we won't be able to flush any data while
2790 * removing the namespaces' disks; fail all the queues now to avoid
2791 * potentially having to clean up the failed sync later.
2793 if (ctrl
->state
== NVME_CTRL_DEAD
)
2794 nvme_kill_queues(ctrl
);
2796 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
2799 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
2801 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
2803 char *envp
[2] = { NULL
, NULL
};
2804 u32 aen_result
= ctrl
->aen_result
;
2806 ctrl
->aen_result
= 0;
2810 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
2813 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
2817 static void nvme_async_event_work(struct work_struct
*work
)
2819 struct nvme_ctrl
*ctrl
=
2820 container_of(work
, struct nvme_ctrl
, async_event_work
);
2822 nvme_aen_uevent(ctrl
);
2823 ctrl
->ops
->submit_async_event(ctrl
);
2826 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
2831 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
2837 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
2840 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
2842 struct nvme_fw_slot_info_log
*log
;
2844 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
2848 if (nvme_get_log(ctrl
, NVME_LOG_FW_SLOT
, log
, sizeof(*log
)))
2849 dev_warn(ctrl
->device
,
2850 "Get FW SLOT INFO log error\n");
2854 static void nvme_fw_act_work(struct work_struct
*work
)
2856 struct nvme_ctrl
*ctrl
= container_of(work
,
2857 struct nvme_ctrl
, fw_act_work
);
2858 unsigned long fw_act_timeout
;
2861 fw_act_timeout
= jiffies
+
2862 msecs_to_jiffies(ctrl
->mtfa
* 100);
2864 fw_act_timeout
= jiffies
+
2865 msecs_to_jiffies(admin_timeout
* 1000);
2867 nvme_stop_queues(ctrl
);
2868 while (nvme_ctrl_pp_status(ctrl
)) {
2869 if (time_after(jiffies
, fw_act_timeout
)) {
2870 dev_warn(ctrl
->device
,
2871 "Fw activation timeout, reset controller\n");
2872 nvme_reset_ctrl(ctrl
);
2878 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2881 nvme_start_queues(ctrl
);
2882 /* read FW slot information to clear the AER */
2883 nvme_get_fw_slot_info(ctrl
);
2886 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
2887 union nvme_result
*res
)
2889 u32 result
= le32_to_cpu(res
->u32
);
2891 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
2894 switch (result
& 0x7) {
2895 case NVME_AER_ERROR
:
2896 case NVME_AER_SMART
:
2899 ctrl
->aen_result
= result
;
2905 switch (result
& 0xff07) {
2906 case NVME_AER_NOTICE_NS_CHANGED
:
2907 dev_info(ctrl
->device
, "rescanning\n");
2908 nvme_queue_scan(ctrl
);
2910 case NVME_AER_NOTICE_FW_ACT_STARTING
:
2911 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
2914 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
2916 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2918 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
2920 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
2922 nvme_stop_keep_alive(ctrl
);
2923 flush_work(&ctrl
->async_event_work
);
2924 flush_work(&ctrl
->scan_work
);
2925 cancel_work_sync(&ctrl
->fw_act_work
);
2927 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
2929 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
2932 nvme_start_keep_alive(ctrl
);
2934 if (ctrl
->queue_count
> 1) {
2935 nvme_queue_scan(ctrl
);
2936 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2937 nvme_start_queues(ctrl
);
2940 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
2942 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
2944 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
2946 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
2948 static void nvme_free_ctrl(struct device
*dev
)
2950 struct nvme_ctrl
*ctrl
=
2951 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
2952 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
2954 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
2955 ida_destroy(&ctrl
->ns_ida
);
2956 kfree(ctrl
->effects
);
2959 mutex_lock(&subsys
->lock
);
2960 list_del(&ctrl
->subsys_entry
);
2961 mutex_unlock(&subsys
->lock
);
2962 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
2965 ctrl
->ops
->free_ctrl(ctrl
);
2968 nvme_put_subsystem(subsys
);
2972 * Initialize a NVMe controller structures. This needs to be called during
2973 * earliest initialization so that we have the initialized structured around
2976 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
2977 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
2981 ctrl
->state
= NVME_CTRL_NEW
;
2982 spin_lock_init(&ctrl
->lock
);
2983 INIT_LIST_HEAD(&ctrl
->namespaces
);
2984 mutex_init(&ctrl
->namespaces_mutex
);
2987 ctrl
->quirks
= quirks
;
2988 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
2989 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
2990 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
2991 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
2993 ret
= ida_simple_get(&nvme_instance_ida
, 0, 0, GFP_KERNEL
);
2996 ctrl
->instance
= ret
;
2998 device_initialize(&ctrl
->ctrl_device
);
2999 ctrl
->device
= &ctrl
->ctrl_device
;
3000 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_chr_devt
), ctrl
->instance
);
3001 ctrl
->device
->class = nvme_class
;
3002 ctrl
->device
->parent
= ctrl
->dev
;
3003 ctrl
->device
->groups
= nvme_dev_attr_groups
;
3004 ctrl
->device
->release
= nvme_free_ctrl
;
3005 dev_set_drvdata(ctrl
->device
, ctrl
);
3006 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
3008 goto out_release_instance
;
3010 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
3011 ctrl
->cdev
.owner
= ops
->module
;
3012 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
3016 ida_init(&ctrl
->ns_ida
);
3019 * Initialize latency tolerance controls. The sysfs files won't
3020 * be visible to userspace unless the device actually supports APST.
3022 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
3023 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
3024 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
3028 kfree_const(dev
->kobj
.name
);
3029 out_release_instance
:
3030 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
3034 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
3037 * nvme_kill_queues(): Ends all namespace queues
3038 * @ctrl: the dead controller that needs to end
3040 * Call this function when the driver determines it is unable to get the
3041 * controller in a state capable of servicing IO.
3043 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
3047 mutex_lock(&ctrl
->namespaces_mutex
);
3049 /* Forcibly unquiesce queues to avoid blocking dispatch */
3051 blk_mq_unquiesce_queue(ctrl
->admin_q
);
3053 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3055 * Revalidating a dead namespace sets capacity to 0. This will
3056 * end buffered writers dirtying pages that can't be synced.
3058 if (!ns
->disk
|| test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
3060 revalidate_disk(ns
->disk
);
3061 blk_set_queue_dying(ns
->queue
);
3063 /* Forcibly unquiesce queues to avoid blocking dispatch */
3064 blk_mq_unquiesce_queue(ns
->queue
);
3066 mutex_unlock(&ctrl
->namespaces_mutex
);
3068 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
3070 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
3074 mutex_lock(&ctrl
->namespaces_mutex
);
3075 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3076 blk_mq_unfreeze_queue(ns
->queue
);
3077 mutex_unlock(&ctrl
->namespaces_mutex
);
3079 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
3081 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
3085 mutex_lock(&ctrl
->namespaces_mutex
);
3086 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3087 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
3091 mutex_unlock(&ctrl
->namespaces_mutex
);
3093 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
3095 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
3099 mutex_lock(&ctrl
->namespaces_mutex
);
3100 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3101 blk_mq_freeze_queue_wait(ns
->queue
);
3102 mutex_unlock(&ctrl
->namespaces_mutex
);
3104 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
3106 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
3110 mutex_lock(&ctrl
->namespaces_mutex
);
3111 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3112 blk_freeze_queue_start(ns
->queue
);
3113 mutex_unlock(&ctrl
->namespaces_mutex
);
3115 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
3117 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
3121 mutex_lock(&ctrl
->namespaces_mutex
);
3122 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3123 blk_mq_quiesce_queue(ns
->queue
);
3124 mutex_unlock(&ctrl
->namespaces_mutex
);
3126 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
3128 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
3132 mutex_lock(&ctrl
->namespaces_mutex
);
3133 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3134 blk_mq_unquiesce_queue(ns
->queue
);
3135 mutex_unlock(&ctrl
->namespaces_mutex
);
3137 EXPORT_SYMBOL_GPL(nvme_start_queues
);
3139 int nvme_reinit_tagset(struct nvme_ctrl
*ctrl
, struct blk_mq_tag_set
*set
)
3141 if (!ctrl
->ops
->reinit_request
)
3144 return blk_mq_tagset_iter(set
, set
->driver_data
,
3145 ctrl
->ops
->reinit_request
);
3147 EXPORT_SYMBOL_GPL(nvme_reinit_tagset
);
3149 int __init
nvme_core_init(void)
3153 nvme_wq
= alloc_workqueue("nvme-wq",
3154 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
3158 result
= alloc_chrdev_region(&nvme_chr_devt
, 0, NVME_MINORS
, "nvme");
3162 nvme_class
= class_create(THIS_MODULE
, "nvme");
3163 if (IS_ERR(nvme_class
)) {
3164 result
= PTR_ERR(nvme_class
);
3165 goto unregister_chrdev
;
3168 nvme_subsys_class
= class_create(THIS_MODULE
, "nvme-subsystem");
3169 if (IS_ERR(nvme_subsys_class
)) {
3170 result
= PTR_ERR(nvme_subsys_class
);
3176 class_destroy(nvme_class
);
3178 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
3180 destroy_workqueue(nvme_wq
);
3184 void nvme_core_exit(void)
3186 ida_destroy(&nvme_subsystems_ida
);
3187 class_destroy(nvme_subsys_class
);
3188 class_destroy(nvme_class
);
3189 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
3190 destroy_workqueue(nvme_wq
);
3193 MODULE_LICENSE("GPL");
3194 MODULE_VERSION("1.0");
3195 module_init(nvme_core_init
);
3196 module_exit(nvme_core_exit
);