1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/hdreg.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/backing-dev.h>
16 #include <linux/list_sort.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
28 #define CREATE_TRACE_POINTS
31 #define NVME_MINORS (1U << MINORBITS)
33 unsigned int admin_timeout
= 60;
34 module_param(admin_timeout
, uint
, 0644);
35 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
36 EXPORT_SYMBOL_GPL(admin_timeout
);
38 unsigned int nvme_io_timeout
= 30;
39 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
40 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
41 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
43 static unsigned char shutdown_timeout
= 5;
44 module_param(shutdown_timeout
, byte
, 0644);
45 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
47 static u8 nvme_max_retries
= 5;
48 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
49 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
51 static unsigned long default_ps_max_latency_us
= 100000;
52 module_param(default_ps_max_latency_us
, ulong
, 0644);
53 MODULE_PARM_DESC(default_ps_max_latency_us
,
54 "max power saving latency for new devices; use PM QOS to change per device");
56 static bool force_apst
;
57 module_param(force_apst
, bool, 0644);
58 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
61 module_param(streams
, bool, 0644);
62 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
65 * nvme_wq - hosts nvme related works that are not reset or delete
66 * nvme_reset_wq - hosts nvme reset works
67 * nvme_delete_wq - hosts nvme delete works
69 * nvme_wq will host works such as scan, aen handling, fw activation,
70 * keep-alive, periodic reconnects etc. nvme_reset_wq
71 * runs reset works which also flush works hosted on nvme_wq for
72 * serialization purposes. nvme_delete_wq host controller deletion
73 * works which flush reset works for serialization.
75 struct workqueue_struct
*nvme_wq
;
76 EXPORT_SYMBOL_GPL(nvme_wq
);
78 struct workqueue_struct
*nvme_reset_wq
;
79 EXPORT_SYMBOL_GPL(nvme_reset_wq
);
81 struct workqueue_struct
*nvme_delete_wq
;
82 EXPORT_SYMBOL_GPL(nvme_delete_wq
);
84 static LIST_HEAD(nvme_subsystems
);
85 static DEFINE_MUTEX(nvme_subsystems_lock
);
87 static DEFINE_IDA(nvme_instance_ida
);
88 static dev_t nvme_ctrl_base_chr_devt
;
89 static struct class *nvme_class
;
90 static struct class *nvme_subsys_class
;
92 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
);
93 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
97 * Prepare a queue for teardown.
99 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
100 * the capacity to 0 after that to avoid blocking dispatchers that may be
101 * holding bd_butex. This will end buffered writers dirtying pages that can't
104 static void nvme_set_queue_dying(struct nvme_ns
*ns
)
106 if (test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
109 blk_set_queue_dying(ns
->queue
);
110 blk_mq_unquiesce_queue(ns
->queue
);
112 set_capacity_and_notify(ns
->disk
, 0);
115 static void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
118 * Only new queue scan work when admin and IO queues are both alive
120 if (ctrl
->state
== NVME_CTRL_LIVE
&& ctrl
->tagset
)
121 queue_work(nvme_wq
, &ctrl
->scan_work
);
125 * Use this function to proceed with scheduling reset_work for a controller
126 * that had previously been set to the resetting state. This is intended for
127 * code paths that can't be interrupted by other reset attempts. A hot removal
128 * may prevent this from succeeding.
130 int nvme_try_sched_reset(struct nvme_ctrl
*ctrl
)
132 if (ctrl
->state
!= NVME_CTRL_RESETTING
)
134 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
138 EXPORT_SYMBOL_GPL(nvme_try_sched_reset
);
140 static void nvme_failfast_work(struct work_struct
*work
)
142 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
143 struct nvme_ctrl
, failfast_work
);
145 if (ctrl
->state
!= NVME_CTRL_CONNECTING
)
148 set_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
149 dev_info(ctrl
->device
, "failfast expired\n");
150 nvme_kick_requeue_lists(ctrl
);
153 static inline void nvme_start_failfast_work(struct nvme_ctrl
*ctrl
)
155 if (!ctrl
->opts
|| ctrl
->opts
->fast_io_fail_tmo
== -1)
158 schedule_delayed_work(&ctrl
->failfast_work
,
159 ctrl
->opts
->fast_io_fail_tmo
* HZ
);
162 static inline void nvme_stop_failfast_work(struct nvme_ctrl
*ctrl
)
167 cancel_delayed_work_sync(&ctrl
->failfast_work
);
168 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
172 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
174 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
176 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
180 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
182 static int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
186 ret
= nvme_reset_ctrl(ctrl
);
188 flush_work(&ctrl
->reset_work
);
189 if (ctrl
->state
!= NVME_CTRL_LIVE
)
196 static void nvme_do_delete_ctrl(struct nvme_ctrl
*ctrl
)
198 dev_info(ctrl
->device
,
199 "Removing ctrl: NQN \"%s\"\n", ctrl
->opts
->subsysnqn
);
201 flush_work(&ctrl
->reset_work
);
202 nvme_stop_ctrl(ctrl
);
203 nvme_remove_namespaces(ctrl
);
204 ctrl
->ops
->delete_ctrl(ctrl
);
205 nvme_uninit_ctrl(ctrl
);
208 static void nvme_delete_ctrl_work(struct work_struct
*work
)
210 struct nvme_ctrl
*ctrl
=
211 container_of(work
, struct nvme_ctrl
, delete_work
);
213 nvme_do_delete_ctrl(ctrl
);
216 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
218 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
220 if (!queue_work(nvme_delete_wq
, &ctrl
->delete_work
))
224 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
226 static void nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
229 * Keep a reference until nvme_do_delete_ctrl() complete,
230 * since ->delete_ctrl can free the controller.
233 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
234 nvme_do_delete_ctrl(ctrl
);
238 static blk_status_t
nvme_error_status(u16 status
)
240 switch (status
& 0x7ff) {
241 case NVME_SC_SUCCESS
:
243 case NVME_SC_CAP_EXCEEDED
:
244 return BLK_STS_NOSPC
;
245 case NVME_SC_LBA_RANGE
:
246 case NVME_SC_CMD_INTERRUPTED
:
247 case NVME_SC_NS_NOT_READY
:
248 return BLK_STS_TARGET
;
249 case NVME_SC_BAD_ATTRIBUTES
:
250 case NVME_SC_ONCS_NOT_SUPPORTED
:
251 case NVME_SC_INVALID_OPCODE
:
252 case NVME_SC_INVALID_FIELD
:
253 case NVME_SC_INVALID_NS
:
254 return BLK_STS_NOTSUPP
;
255 case NVME_SC_WRITE_FAULT
:
256 case NVME_SC_READ_ERROR
:
257 case NVME_SC_UNWRITTEN_BLOCK
:
258 case NVME_SC_ACCESS_DENIED
:
259 case NVME_SC_READ_ONLY
:
260 case NVME_SC_COMPARE_FAILED
:
261 return BLK_STS_MEDIUM
;
262 case NVME_SC_GUARD_CHECK
:
263 case NVME_SC_APPTAG_CHECK
:
264 case NVME_SC_REFTAG_CHECK
:
265 case NVME_SC_INVALID_PI
:
266 return BLK_STS_PROTECTION
;
267 case NVME_SC_RESERVATION_CONFLICT
:
268 return BLK_STS_NEXUS
;
269 case NVME_SC_HOST_PATH_ERROR
:
270 return BLK_STS_TRANSPORT
;
271 case NVME_SC_ZONE_TOO_MANY_ACTIVE
:
272 return BLK_STS_ZONE_ACTIVE_RESOURCE
;
273 case NVME_SC_ZONE_TOO_MANY_OPEN
:
274 return BLK_STS_ZONE_OPEN_RESOURCE
;
276 return BLK_STS_IOERR
;
280 static void nvme_retry_req(struct request
*req
)
282 struct nvme_ns
*ns
= req
->q
->queuedata
;
283 unsigned long delay
= 0;
286 /* The mask and shift result must be <= 3 */
287 crd
= (nvme_req(req
)->status
& NVME_SC_CRD
) >> 11;
289 delay
= ns
->ctrl
->crdt
[crd
- 1] * 100;
291 nvme_req(req
)->retries
++;
292 blk_mq_requeue_request(req
, false);
293 blk_mq_delay_kick_requeue_list(req
->q
, delay
);
296 enum nvme_disposition
{
302 static inline enum nvme_disposition
nvme_decide_disposition(struct request
*req
)
304 if (likely(nvme_req(req
)->status
== 0))
307 if (blk_noretry_request(req
) ||
308 (nvme_req(req
)->status
& NVME_SC_DNR
) ||
309 nvme_req(req
)->retries
>= nvme_max_retries
)
312 if (req
->cmd_flags
& REQ_NVME_MPATH
) {
313 if (nvme_is_path_error(nvme_req(req
)->status
) ||
314 blk_queue_dying(req
->q
))
317 if (blk_queue_dying(req
->q
))
324 static inline void nvme_end_req(struct request
*req
)
326 blk_status_t status
= nvme_error_status(nvme_req(req
)->status
);
328 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED
) &&
329 req_op(req
) == REQ_OP_ZONE_APPEND
)
330 req
->__sector
= nvme_lba_to_sect(req
->q
->queuedata
,
331 le64_to_cpu(nvme_req(req
)->result
.u64
));
333 nvme_trace_bio_complete(req
);
334 blk_mq_end_request(req
, status
);
337 void nvme_complete_rq(struct request
*req
)
339 trace_nvme_complete_rq(req
);
340 nvme_cleanup_cmd(req
);
342 if (nvme_req(req
)->ctrl
->kas
)
343 nvme_req(req
)->ctrl
->comp_seen
= true;
345 switch (nvme_decide_disposition(req
)) {
353 nvme_failover_req(req
);
357 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
359 bool nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
361 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
362 "Cancelling I/O %d", req
->tag
);
364 /* don't abort one completed request */
365 if (blk_mq_request_completed(req
))
368 nvme_req(req
)->status
= NVME_SC_HOST_ABORTED_CMD
;
369 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
370 blk_mq_complete_request(req
);
373 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
375 void nvme_cancel_tagset(struct nvme_ctrl
*ctrl
)
378 blk_mq_tagset_busy_iter(ctrl
->tagset
,
379 nvme_cancel_request
, ctrl
);
380 blk_mq_tagset_wait_completed_request(ctrl
->tagset
);
383 EXPORT_SYMBOL_GPL(nvme_cancel_tagset
);
385 void nvme_cancel_admin_tagset(struct nvme_ctrl
*ctrl
)
387 if (ctrl
->admin_tagset
) {
388 blk_mq_tagset_busy_iter(ctrl
->admin_tagset
,
389 nvme_cancel_request
, ctrl
);
390 blk_mq_tagset_wait_completed_request(ctrl
->admin_tagset
);
393 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset
);
395 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
396 enum nvme_ctrl_state new_state
)
398 enum nvme_ctrl_state old_state
;
400 bool changed
= false;
402 spin_lock_irqsave(&ctrl
->lock
, flags
);
404 old_state
= ctrl
->state
;
409 case NVME_CTRL_RESETTING
:
410 case NVME_CTRL_CONNECTING
:
417 case NVME_CTRL_RESETTING
:
427 case NVME_CTRL_CONNECTING
:
430 case NVME_CTRL_RESETTING
:
437 case NVME_CTRL_DELETING
:
440 case NVME_CTRL_RESETTING
:
441 case NVME_CTRL_CONNECTING
:
448 case NVME_CTRL_DELETING_NOIO
:
450 case NVME_CTRL_DELETING
:
460 case NVME_CTRL_DELETING
:
472 ctrl
->state
= new_state
;
473 wake_up_all(&ctrl
->state_wq
);
476 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
480 if (ctrl
->state
== NVME_CTRL_LIVE
) {
481 if (old_state
== NVME_CTRL_CONNECTING
)
482 nvme_stop_failfast_work(ctrl
);
483 nvme_kick_requeue_lists(ctrl
);
484 } else if (ctrl
->state
== NVME_CTRL_CONNECTING
&&
485 old_state
== NVME_CTRL_RESETTING
) {
486 nvme_start_failfast_work(ctrl
);
490 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
493 * Returns true for sink states that can't ever transition back to live.
495 static bool nvme_state_terminal(struct nvme_ctrl
*ctrl
)
497 switch (ctrl
->state
) {
500 case NVME_CTRL_RESETTING
:
501 case NVME_CTRL_CONNECTING
:
503 case NVME_CTRL_DELETING
:
504 case NVME_CTRL_DELETING_NOIO
:
508 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl
->state
);
514 * Waits for the controller state to be resetting, or returns false if it is
515 * not possible to ever transition to that state.
517 bool nvme_wait_reset(struct nvme_ctrl
*ctrl
)
519 wait_event(ctrl
->state_wq
,
520 nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
) ||
521 nvme_state_terminal(ctrl
));
522 return ctrl
->state
== NVME_CTRL_RESETTING
;
524 EXPORT_SYMBOL_GPL(nvme_wait_reset
);
526 static void nvme_free_ns_head(struct kref
*ref
)
528 struct nvme_ns_head
*head
=
529 container_of(ref
, struct nvme_ns_head
, ref
);
531 nvme_mpath_remove_disk(head
);
532 ida_simple_remove(&head
->subsys
->ns_ida
, head
->instance
);
533 cleanup_srcu_struct(&head
->srcu
);
534 nvme_put_subsystem(head
->subsys
);
538 static void nvme_put_ns_head(struct nvme_ns_head
*head
)
540 kref_put(&head
->ref
, nvme_free_ns_head
);
543 static void nvme_free_ns(struct kref
*kref
)
545 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
548 nvme_nvm_unregister(ns
);
551 nvme_put_ns_head(ns
->head
);
552 nvme_put_ctrl(ns
->ctrl
);
556 void nvme_put_ns(struct nvme_ns
*ns
)
558 kref_put(&ns
->kref
, nvme_free_ns
);
560 EXPORT_SYMBOL_NS_GPL(nvme_put_ns
, NVME_TARGET_PASSTHRU
);
562 static inline void nvme_clear_nvme_request(struct request
*req
)
564 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
565 nvme_req(req
)->retries
= 0;
566 nvme_req(req
)->flags
= 0;
567 req
->rq_flags
|= RQF_DONTPREP
;
571 static inline unsigned int nvme_req_op(struct nvme_command
*cmd
)
573 return nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
576 static inline void nvme_init_request(struct request
*req
,
577 struct nvme_command
*cmd
)
579 if (req
->q
->queuedata
)
580 req
->timeout
= NVME_IO_TIMEOUT
;
581 else /* no queuedata implies admin queue */
582 req
->timeout
= NVME_ADMIN_TIMEOUT
;
584 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
585 nvme_clear_nvme_request(req
);
586 nvme_req(req
)->cmd
= cmd
;
589 struct request
*nvme_alloc_request(struct request_queue
*q
,
590 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
)
594 req
= blk_mq_alloc_request(q
, nvme_req_op(cmd
), flags
);
596 nvme_init_request(req
, cmd
);
599 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
601 static struct request
*nvme_alloc_request_qid(struct request_queue
*q
,
602 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
, int qid
)
606 req
= blk_mq_alloc_request_hctx(q
, nvme_req_op(cmd
), flags
,
609 nvme_init_request(req
, cmd
);
613 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
615 struct nvme_command c
;
617 memset(&c
, 0, sizeof(c
));
619 c
.directive
.opcode
= nvme_admin_directive_send
;
620 c
.directive
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
621 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
622 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
623 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
624 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
626 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
629 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
631 return nvme_toggle_streams(ctrl
, false);
634 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
636 return nvme_toggle_streams(ctrl
, true);
639 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
640 struct streams_directive_params
*s
, u32 nsid
)
642 struct nvme_command c
;
644 memset(&c
, 0, sizeof(c
));
645 memset(s
, 0, sizeof(*s
));
647 c
.directive
.opcode
= nvme_admin_directive_recv
;
648 c
.directive
.nsid
= cpu_to_le32(nsid
);
649 c
.directive
.numd
= cpu_to_le32(nvme_bytes_to_numd(sizeof(*s
)));
650 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
651 c
.directive
.dtype
= NVME_DIR_STREAMS
;
653 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
656 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
658 struct streams_directive_params s
;
661 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
666 ret
= nvme_enable_streams(ctrl
);
670 ret
= nvme_get_stream_params(ctrl
, &s
, NVME_NSID_ALL
);
672 goto out_disable_stream
;
674 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
675 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
676 dev_info(ctrl
->device
, "too few streams (%u) available\n",
678 goto out_disable_stream
;
681 ctrl
->nr_streams
= min_t(u16
, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
682 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
686 nvme_disable_streams(ctrl
);
691 * Check if 'req' has a write hint associated with it. If it does, assign
692 * a valid namespace stream to the write.
694 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
695 struct request
*req
, u16
*control
,
698 enum rw_hint streamid
= req
->write_hint
;
700 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
704 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
707 *control
|= NVME_RW_DTYPE_STREAMS
;
708 *dsmgmt
|= streamid
<< 16;
711 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
712 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
715 static void nvme_setup_passthrough(struct request
*req
,
716 struct nvme_command
*cmd
)
718 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
719 /* passthru commands should let the driver set the SGL flags */
720 cmd
->common
.flags
&= ~NVME_CMD_SGL_ALL
;
723 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
724 struct nvme_command
*cmnd
)
726 cmnd
->common
.opcode
= nvme_cmd_flush
;
727 cmnd
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
730 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
731 struct nvme_command
*cmnd
)
733 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
734 struct nvme_dsm_range
*range
;
738 * Some devices do not consider the DSM 'Number of Ranges' field when
739 * determining how much data to DMA. Always allocate memory for maximum
740 * number of segments to prevent device reading beyond end of buffer.
742 static const size_t alloc_size
= sizeof(*range
) * NVME_DSM_MAX_RANGES
;
744 range
= kzalloc(alloc_size
, GFP_ATOMIC
| __GFP_NOWARN
);
747 * If we fail allocation our range, fallback to the controller
748 * discard page. If that's also busy, it's safe to return
749 * busy, as we know we can make progress once that's freed.
751 if (test_and_set_bit_lock(0, &ns
->ctrl
->discard_page_busy
))
752 return BLK_STS_RESOURCE
;
754 range
= page_address(ns
->ctrl
->discard_page
);
757 __rq_for_each_bio(bio
, req
) {
758 u64 slba
= nvme_sect_to_lba(ns
, bio
->bi_iter
.bi_sector
);
759 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
762 range
[n
].cattr
= cpu_to_le32(0);
763 range
[n
].nlb
= cpu_to_le32(nlb
);
764 range
[n
].slba
= cpu_to_le64(slba
);
769 if (WARN_ON_ONCE(n
!= segments
)) {
770 if (virt_to_page(range
) == ns
->ctrl
->discard_page
)
771 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
774 return BLK_STS_IOERR
;
777 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
778 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
779 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
780 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
782 req
->special_vec
.bv_page
= virt_to_page(range
);
783 req
->special_vec
.bv_offset
= offset_in_page(range
);
784 req
->special_vec
.bv_len
= alloc_size
;
785 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
790 static inline blk_status_t
nvme_setup_write_zeroes(struct nvme_ns
*ns
,
791 struct request
*req
, struct nvme_command
*cmnd
)
793 if (ns
->ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
794 return nvme_setup_discard(ns
, req
, cmnd
);
796 cmnd
->write_zeroes
.opcode
= nvme_cmd_write_zeroes
;
797 cmnd
->write_zeroes
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
798 cmnd
->write_zeroes
.slba
=
799 cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
800 cmnd
->write_zeroes
.length
=
801 cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
802 cmnd
->write_zeroes
.control
= 0;
806 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
807 struct request
*req
, struct nvme_command
*cmnd
,
810 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
814 if (req
->cmd_flags
& REQ_FUA
)
815 control
|= NVME_RW_FUA
;
816 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
817 control
|= NVME_RW_LR
;
819 if (req
->cmd_flags
& REQ_RAHEAD
)
820 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
822 cmnd
->rw
.opcode
= op
;
823 cmnd
->rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
824 cmnd
->rw
.slba
= cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
825 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
827 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
828 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
832 * If formated with metadata, the block layer always provides a
833 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
834 * we enable the PRACT bit for protection information or set the
835 * namespace capacity to zero to prevent any I/O.
837 if (!blk_integrity_rq(req
)) {
838 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
839 return BLK_STS_NOTSUPP
;
840 control
|= NVME_RW_PRINFO_PRACT
;
843 switch (ns
->pi_type
) {
844 case NVME_NS_DPS_PI_TYPE3
:
845 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
847 case NVME_NS_DPS_PI_TYPE1
:
848 case NVME_NS_DPS_PI_TYPE2
:
849 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
850 NVME_RW_PRINFO_PRCHK_REF
;
851 if (op
== nvme_cmd_zone_append
)
852 control
|= NVME_RW_APPEND_PIREMAP
;
853 cmnd
->rw
.reftag
= cpu_to_le32(t10_pi_ref_tag(req
));
858 cmnd
->rw
.control
= cpu_to_le16(control
);
859 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
863 void nvme_cleanup_cmd(struct request
*req
)
865 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
866 struct nvme_ns
*ns
= req
->rq_disk
->private_data
;
867 struct page
*page
= req
->special_vec
.bv_page
;
869 if (page
== ns
->ctrl
->discard_page
)
870 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
872 kfree(page_address(page
) + req
->special_vec
.bv_offset
);
875 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd
);
877 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
878 struct nvme_command
*cmd
)
880 blk_status_t ret
= BLK_STS_OK
;
882 nvme_clear_nvme_request(req
);
884 memset(cmd
, 0, sizeof(*cmd
));
885 switch (req_op(req
)) {
888 nvme_setup_passthrough(req
, cmd
);
891 nvme_setup_flush(ns
, cmd
);
893 case REQ_OP_ZONE_RESET_ALL
:
894 case REQ_OP_ZONE_RESET
:
895 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_RESET
);
897 case REQ_OP_ZONE_OPEN
:
898 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_OPEN
);
900 case REQ_OP_ZONE_CLOSE
:
901 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_CLOSE
);
903 case REQ_OP_ZONE_FINISH
:
904 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_FINISH
);
906 case REQ_OP_WRITE_ZEROES
:
907 ret
= nvme_setup_write_zeroes(ns
, req
, cmd
);
910 ret
= nvme_setup_discard(ns
, req
, cmd
);
913 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_read
);
916 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_write
);
918 case REQ_OP_ZONE_APPEND
:
919 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_zone_append
);
923 return BLK_STS_IOERR
;
926 cmd
->common
.command_id
= req
->tag
;
927 trace_nvme_setup_cmd(req
, cmd
);
930 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
932 static void nvme_end_sync_rq(struct request
*rq
, blk_status_t error
)
934 struct completion
*waiting
= rq
->end_io_data
;
936 rq
->end_io_data
= NULL
;
940 static void nvme_execute_rq_polled(struct request_queue
*q
,
941 struct gendisk
*bd_disk
, struct request
*rq
, int at_head
)
943 DECLARE_COMPLETION_ONSTACK(wait
);
945 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
));
947 rq
->cmd_flags
|= REQ_HIPRI
;
948 rq
->end_io_data
= &wait
;
949 blk_execute_rq_nowait(q
, bd_disk
, rq
, at_head
, nvme_end_sync_rq
);
951 while (!completion_done(&wait
)) {
952 blk_poll(q
, request_to_qc_t(rq
->mq_hctx
, rq
), true);
958 * Returns 0 on success. If the result is negative, it's a Linux error code;
959 * if the result is positive, it's an NVM Express status code
961 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
962 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
963 unsigned timeout
, int qid
, int at_head
,
964 blk_mq_req_flags_t flags
, bool poll
)
969 if (qid
== NVME_QID_ANY
)
970 req
= nvme_alloc_request(q
, cmd
, flags
);
972 req
= nvme_alloc_request_qid(q
, cmd
, flags
, qid
);
977 req
->timeout
= timeout
;
979 if (buffer
&& bufflen
) {
980 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
986 nvme_execute_rq_polled(req
->q
, NULL
, req
, at_head
);
988 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
990 *result
= nvme_req(req
)->result
;
991 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
994 ret
= nvme_req(req
)->status
;
996 blk_mq_free_request(req
);
999 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
1001 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
1002 void *buffer
, unsigned bufflen
)
1004 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
1005 NVME_QID_ANY
, 0, 0, false);
1007 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
1009 static void *nvme_add_user_metadata(struct bio
*bio
, void __user
*ubuf
,
1010 unsigned len
, u32 seed
, bool write
)
1012 struct bio_integrity_payload
*bip
;
1016 buf
= kmalloc(len
, GFP_KERNEL
);
1021 if (write
&& copy_from_user(buf
, ubuf
, len
))
1024 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
1030 bip
->bip_iter
.bi_size
= len
;
1031 bip
->bip_iter
.bi_sector
= seed
;
1032 ret
= bio_integrity_add_page(bio
, virt_to_page(buf
), len
,
1033 offset_in_page(buf
));
1040 return ERR_PTR(ret
);
1043 static u32
nvme_known_admin_effects(u8 opcode
)
1046 case nvme_admin_format_nvm
:
1047 return NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_NCC
|
1048 NVME_CMD_EFFECTS_CSE_MASK
;
1049 case nvme_admin_sanitize_nvm
:
1050 return NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
;
1057 u32
nvme_command_effects(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
, u8 opcode
)
1062 if (ns
->head
->effects
)
1063 effects
= le32_to_cpu(ns
->head
->effects
->iocs
[opcode
]);
1064 if (effects
& ~(NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
))
1065 dev_warn(ctrl
->device
,
1066 "IO command:%02x has unhandled effects:%08x\n",
1072 effects
= le32_to_cpu(ctrl
->effects
->acs
[opcode
]);
1073 effects
|= nvme_known_admin_effects(opcode
);
1077 EXPORT_SYMBOL_NS_GPL(nvme_command_effects
, NVME_TARGET_PASSTHRU
);
1079 static u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1082 u32 effects
= nvme_command_effects(ctrl
, ns
, opcode
);
1085 * For simplicity, IO to all namespaces is quiesced even if the command
1086 * effects say only one namespace is affected.
1088 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1089 mutex_lock(&ctrl
->scan_lock
);
1090 mutex_lock(&ctrl
->subsys
->lock
);
1091 nvme_mpath_start_freeze(ctrl
->subsys
);
1092 nvme_mpath_wait_freeze(ctrl
->subsys
);
1093 nvme_start_freeze(ctrl
);
1094 nvme_wait_freeze(ctrl
);
1099 static void nvme_passthru_end(struct nvme_ctrl
*ctrl
, u32 effects
)
1101 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1102 nvme_unfreeze(ctrl
);
1103 nvme_mpath_unfreeze(ctrl
->subsys
);
1104 mutex_unlock(&ctrl
->subsys
->lock
);
1105 nvme_remove_invalid_namespaces(ctrl
, NVME_NSID_ALL
);
1106 mutex_unlock(&ctrl
->scan_lock
);
1108 if (effects
& NVME_CMD_EFFECTS_CCC
)
1109 nvme_init_identify(ctrl
);
1110 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
)) {
1111 nvme_queue_scan(ctrl
);
1112 flush_work(&ctrl
->scan_work
);
1116 void nvme_execute_passthru_rq(struct request
*rq
)
1118 struct nvme_command
*cmd
= nvme_req(rq
)->cmd
;
1119 struct nvme_ctrl
*ctrl
= nvme_req(rq
)->ctrl
;
1120 struct nvme_ns
*ns
= rq
->q
->queuedata
;
1121 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
1124 effects
= nvme_passthru_start(ctrl
, ns
, cmd
->common
.opcode
);
1125 blk_execute_rq(rq
->q
, disk
, rq
, 0);
1126 nvme_passthru_end(ctrl
, effects
);
1128 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq
, NVME_TARGET_PASSTHRU
);
1130 static int nvme_submit_user_cmd(struct request_queue
*q
,
1131 struct nvme_command
*cmd
, void __user
*ubuffer
,
1132 unsigned bufflen
, void __user
*meta_buffer
, unsigned meta_len
,
1133 u32 meta_seed
, u64
*result
, unsigned timeout
)
1135 bool write
= nvme_is_write(cmd
);
1136 struct nvme_ns
*ns
= q
->queuedata
;
1137 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
1138 struct request
*req
;
1139 struct bio
*bio
= NULL
;
1143 req
= nvme_alloc_request(q
, cmd
, 0);
1145 return PTR_ERR(req
);
1148 req
->timeout
= timeout
;
1149 nvme_req(req
)->flags
|= NVME_REQ_USERCMD
;
1151 if (ubuffer
&& bufflen
) {
1152 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
1157 bio
->bi_disk
= disk
;
1158 if (disk
&& meta_buffer
&& meta_len
) {
1159 meta
= nvme_add_user_metadata(bio
, meta_buffer
, meta_len
,
1162 ret
= PTR_ERR(meta
);
1165 req
->cmd_flags
|= REQ_INTEGRITY
;
1169 nvme_execute_passthru_rq(req
);
1170 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
1173 ret
= nvme_req(req
)->status
;
1175 *result
= le64_to_cpu(nvme_req(req
)->result
.u64
);
1176 if (meta
&& !ret
&& !write
) {
1177 if (copy_to_user(meta_buffer
, meta
, meta_len
))
1183 blk_rq_unmap_user(bio
);
1185 blk_mq_free_request(req
);
1189 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
1191 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
1192 unsigned long flags
;
1193 bool startka
= false;
1195 blk_mq_free_request(rq
);
1198 dev_err(ctrl
->device
,
1199 "failed nvme_keep_alive_end_io error=%d\n",
1204 ctrl
->comp_seen
= false;
1205 spin_lock_irqsave(&ctrl
->lock
, flags
);
1206 if (ctrl
->state
== NVME_CTRL_LIVE
||
1207 ctrl
->state
== NVME_CTRL_CONNECTING
)
1209 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1211 queue_delayed_work(nvme_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
1214 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
1218 rq
= nvme_alloc_request(ctrl
->admin_q
, &ctrl
->ka_cmd
,
1219 BLK_MQ_REQ_RESERVED
);
1223 rq
->timeout
= ctrl
->kato
* HZ
;
1224 rq
->end_io_data
= ctrl
;
1226 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
1231 static void nvme_keep_alive_work(struct work_struct
*work
)
1233 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
1234 struct nvme_ctrl
, ka_work
);
1235 bool comp_seen
= ctrl
->comp_seen
;
1237 if ((ctrl
->ctratt
& NVME_CTRL_ATTR_TBKAS
) && comp_seen
) {
1238 dev_dbg(ctrl
->device
,
1239 "reschedule traffic based keep-alive timer\n");
1240 ctrl
->comp_seen
= false;
1241 queue_delayed_work(nvme_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
1245 if (nvme_keep_alive(ctrl
)) {
1246 /* allocation failure, reset the controller */
1247 dev_err(ctrl
->device
, "keep-alive failed\n");
1248 nvme_reset_ctrl(ctrl
);
1253 static void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
1255 if (unlikely(ctrl
->kato
== 0))
1258 queue_delayed_work(nvme_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
1261 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
1263 if (unlikely(ctrl
->kato
== 0))
1266 cancel_delayed_work_sync(&ctrl
->ka_work
);
1268 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
1271 * In NVMe 1.0 the CNS field was just a binary controller or namespace
1272 * flag, thus sending any new CNS opcodes has a big chance of not working.
1273 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1274 * (but not for any later version).
1276 static bool nvme_ctrl_limited_cns(struct nvme_ctrl
*ctrl
)
1278 if (ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)
1279 return ctrl
->vs
< NVME_VS(1, 2, 0);
1280 return ctrl
->vs
< NVME_VS(1, 1, 0);
1283 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
1285 struct nvme_command c
= { };
1288 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1289 c
.identify
.opcode
= nvme_admin_identify
;
1290 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
1292 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
1296 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
1297 sizeof(struct nvme_id_ctrl
));
1303 static bool nvme_multi_css(struct nvme_ctrl
*ctrl
)
1305 return (ctrl
->ctrl_config
& NVME_CC_CSS_MASK
) == NVME_CC_CSS_CSI
;
1308 static int nvme_process_ns_desc(struct nvme_ctrl
*ctrl
, struct nvme_ns_ids
*ids
,
1309 struct nvme_ns_id_desc
*cur
, bool *csi_seen
)
1311 const char *warn_str
= "ctrl returned bogus length:";
1314 switch (cur
->nidt
) {
1315 case NVME_NIDT_EUI64
:
1316 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
1317 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_EUI64\n",
1318 warn_str
, cur
->nidl
);
1321 memcpy(ids
->eui64
, data
+ sizeof(*cur
), NVME_NIDT_EUI64_LEN
);
1322 return NVME_NIDT_EUI64_LEN
;
1323 case NVME_NIDT_NGUID
:
1324 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
1325 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_NGUID\n",
1326 warn_str
, cur
->nidl
);
1329 memcpy(ids
->nguid
, data
+ sizeof(*cur
), NVME_NIDT_NGUID_LEN
);
1330 return NVME_NIDT_NGUID_LEN
;
1331 case NVME_NIDT_UUID
:
1332 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
1333 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_UUID\n",
1334 warn_str
, cur
->nidl
);
1337 uuid_copy(&ids
->uuid
, data
+ sizeof(*cur
));
1338 return NVME_NIDT_UUID_LEN
;
1340 if (cur
->nidl
!= NVME_NIDT_CSI_LEN
) {
1341 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_CSI\n",
1342 warn_str
, cur
->nidl
);
1345 memcpy(&ids
->csi
, data
+ sizeof(*cur
), NVME_NIDT_CSI_LEN
);
1347 return NVME_NIDT_CSI_LEN
;
1349 /* Skip unknown types */
1354 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1355 struct nvme_ns_ids
*ids
)
1357 struct nvme_command c
= { };
1358 bool csi_seen
= false;
1359 int status
, pos
, len
;
1362 if (ctrl
->vs
< NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl
))
1364 if (ctrl
->quirks
& NVME_QUIRK_NO_NS_DESC_LIST
)
1367 c
.identify
.opcode
= nvme_admin_identify
;
1368 c
.identify
.nsid
= cpu_to_le32(nsid
);
1369 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
1371 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
1375 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
1376 NVME_IDENTIFY_DATA_SIZE
);
1378 dev_warn(ctrl
->device
,
1379 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1384 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
1385 struct nvme_ns_id_desc
*cur
= data
+ pos
;
1390 len
= nvme_process_ns_desc(ctrl
, ids
, cur
, &csi_seen
);
1394 len
+= sizeof(*cur
);
1397 if (nvme_multi_css(ctrl
) && !csi_seen
) {
1398 dev_warn(ctrl
->device
, "Command set not reported for nsid:%d\n",
1408 static int nvme_identify_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1409 struct nvme_ns_ids
*ids
, struct nvme_id_ns
**id
)
1411 struct nvme_command c
= { };
1414 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1415 c
.identify
.opcode
= nvme_admin_identify
;
1416 c
.identify
.nsid
= cpu_to_le32(nsid
);
1417 c
.identify
.cns
= NVME_ID_CNS_NS
;
1419 *id
= kmalloc(sizeof(**id
), GFP_KERNEL
);
1423 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, *id
, sizeof(**id
));
1425 dev_warn(ctrl
->device
, "Identify namespace failed (%d)\n", error
);
1429 error
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
1430 if ((*id
)->ncap
== 0) /* namespace not allocated or attached */
1433 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
1434 !memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
1435 memcpy(ids
->eui64
, (*id
)->eui64
, sizeof(ids
->eui64
));
1436 if (ctrl
->vs
>= NVME_VS(1, 2, 0) &&
1437 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
1438 memcpy(ids
->nguid
, (*id
)->nguid
, sizeof(ids
->nguid
));
1447 static int nvme_features(struct nvme_ctrl
*dev
, u8 op
, unsigned int fid
,
1448 unsigned int dword11
, void *buffer
, size_t buflen
, u32
*result
)
1450 union nvme_result res
= { 0 };
1451 struct nvme_command c
;
1454 memset(&c
, 0, sizeof(c
));
1455 c
.features
.opcode
= op
;
1456 c
.features
.fid
= cpu_to_le32(fid
);
1457 c
.features
.dword11
= cpu_to_le32(dword11
);
1459 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
1460 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0, false);
1461 if (ret
>= 0 && result
)
1462 *result
= le32_to_cpu(res
.u32
);
1466 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1467 unsigned int dword11
, void *buffer
, size_t buflen
,
1470 return nvme_features(dev
, nvme_admin_set_features
, fid
, dword11
, buffer
,
1473 EXPORT_SYMBOL_GPL(nvme_set_features
);
1475 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1476 unsigned int dword11
, void *buffer
, size_t buflen
,
1479 return nvme_features(dev
, nvme_admin_get_features
, fid
, dword11
, buffer
,
1482 EXPORT_SYMBOL_GPL(nvme_get_features
);
1484 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
1486 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
1488 int status
, nr_io_queues
;
1490 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
1496 * Degraded controllers might return an error when setting the queue
1497 * count. We still want to be able to bring them online and offer
1498 * access to the admin queue, as that might be only way to fix them up.
1501 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
1504 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
1505 *count
= min(*count
, nr_io_queues
);
1510 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
1512 #define NVME_AEN_SUPPORTED \
1513 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1514 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1516 static void nvme_enable_aen(struct nvme_ctrl
*ctrl
)
1518 u32 result
, supported_aens
= ctrl
->oaes
& NVME_AEN_SUPPORTED
;
1521 if (!supported_aens
)
1524 status
= nvme_set_features(ctrl
, NVME_FEAT_ASYNC_EVENT
, supported_aens
,
1527 dev_warn(ctrl
->device
, "Failed to configure AEN (cfg %x)\n",
1530 queue_work(nvme_wq
, &ctrl
->async_event_work
);
1534 * Convert integer values from ioctl structures to user pointers, silently
1535 * ignoring the upper bits in the compat case to match behaviour of 32-bit
1538 static void __user
*nvme_to_user_ptr(uintptr_t ptrval
)
1540 if (in_compat_syscall())
1541 ptrval
= (compat_uptr_t
)ptrval
;
1542 return (void __user
*)ptrval
;
1545 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
1547 struct nvme_user_io io
;
1548 struct nvme_command c
;
1549 unsigned length
, meta_len
;
1550 void __user
*metadata
;
1552 if (copy_from_user(&io
, uio
, sizeof(io
)))
1557 switch (io
.opcode
) {
1558 case nvme_cmd_write
:
1560 case nvme_cmd_compare
:
1566 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
1568 if ((io
.control
& NVME_RW_PRINFO_PRACT
) &&
1569 ns
->ms
== sizeof(struct t10_pi_tuple
)) {
1571 * Protection information is stripped/inserted by the
1574 if (nvme_to_user_ptr(io
.metadata
))
1579 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
1580 metadata
= nvme_to_user_ptr(io
.metadata
);
1583 if (ns
->features
& NVME_NS_EXT_LBAS
) {
1586 } else if (meta_len
) {
1587 if ((io
.metadata
& 3) || !io
.metadata
)
1591 memset(&c
, 0, sizeof(c
));
1592 c
.rw
.opcode
= io
.opcode
;
1593 c
.rw
.flags
= io
.flags
;
1594 c
.rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1595 c
.rw
.slba
= cpu_to_le64(io
.slba
);
1596 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
1597 c
.rw
.control
= cpu_to_le16(io
.control
);
1598 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
1599 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
1600 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
1601 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
1603 return nvme_submit_user_cmd(ns
->queue
, &c
,
1604 nvme_to_user_ptr(io
.addr
), length
,
1605 metadata
, meta_len
, lower_32_bits(io
.slba
), NULL
, 0);
1608 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1609 struct nvme_passthru_cmd __user
*ucmd
)
1611 struct nvme_passthru_cmd cmd
;
1612 struct nvme_command c
;
1613 unsigned timeout
= 0;
1617 if (!capable(CAP_SYS_ADMIN
))
1619 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1624 memset(&c
, 0, sizeof(c
));
1625 c
.common
.opcode
= cmd
.opcode
;
1626 c
.common
.flags
= cmd
.flags
;
1627 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1628 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1629 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1630 c
.common
.cdw10
= cpu_to_le32(cmd
.cdw10
);
1631 c
.common
.cdw11
= cpu_to_le32(cmd
.cdw11
);
1632 c
.common
.cdw12
= cpu_to_le32(cmd
.cdw12
);
1633 c
.common
.cdw13
= cpu_to_le32(cmd
.cdw13
);
1634 c
.common
.cdw14
= cpu_to_le32(cmd
.cdw14
);
1635 c
.common
.cdw15
= cpu_to_le32(cmd
.cdw15
);
1638 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1640 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1641 nvme_to_user_ptr(cmd
.addr
), cmd
.data_len
,
1642 nvme_to_user_ptr(cmd
.metadata
), cmd
.metadata_len
,
1643 0, &result
, timeout
);
1646 if (put_user(result
, &ucmd
->result
))
1653 static int nvme_user_cmd64(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1654 struct nvme_passthru_cmd64 __user
*ucmd
)
1656 struct nvme_passthru_cmd64 cmd
;
1657 struct nvme_command c
;
1658 unsigned timeout
= 0;
1661 if (!capable(CAP_SYS_ADMIN
))
1663 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1668 memset(&c
, 0, sizeof(c
));
1669 c
.common
.opcode
= cmd
.opcode
;
1670 c
.common
.flags
= cmd
.flags
;
1671 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1672 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1673 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1674 c
.common
.cdw10
= cpu_to_le32(cmd
.cdw10
);
1675 c
.common
.cdw11
= cpu_to_le32(cmd
.cdw11
);
1676 c
.common
.cdw12
= cpu_to_le32(cmd
.cdw12
);
1677 c
.common
.cdw13
= cpu_to_le32(cmd
.cdw13
);
1678 c
.common
.cdw14
= cpu_to_le32(cmd
.cdw14
);
1679 c
.common
.cdw15
= cpu_to_le32(cmd
.cdw15
);
1682 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1684 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1685 nvme_to_user_ptr(cmd
.addr
), cmd
.data_len
,
1686 nvme_to_user_ptr(cmd
.metadata
), cmd
.metadata_len
,
1687 0, &cmd
.result
, timeout
);
1690 if (put_user(cmd
.result
, &ucmd
->result
))
1698 * Issue ioctl requests on the first available path. Note that unlike normal
1699 * block layer requests we will not retry failed request on another controller.
1701 struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
,
1702 struct nvme_ns_head
**head
, int *srcu_idx
)
1704 #ifdef CONFIG_NVME_MULTIPATH
1705 if (disk
->fops
== &nvme_ns_head_ops
) {
1708 *head
= disk
->private_data
;
1709 *srcu_idx
= srcu_read_lock(&(*head
)->srcu
);
1710 ns
= nvme_find_path(*head
);
1712 srcu_read_unlock(&(*head
)->srcu
, *srcu_idx
);
1718 return disk
->private_data
;
1721 void nvme_put_ns_from_disk(struct nvme_ns_head
*head
, int idx
)
1724 srcu_read_unlock(&head
->srcu
, idx
);
1727 static bool is_ctrl_ioctl(unsigned int cmd
)
1729 if (cmd
== NVME_IOCTL_ADMIN_CMD
|| cmd
== NVME_IOCTL_ADMIN64_CMD
)
1731 if (is_sed_ioctl(cmd
))
1736 static int nvme_handle_ctrl_ioctl(struct nvme_ns
*ns
, unsigned int cmd
,
1738 struct nvme_ns_head
*head
,
1741 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1744 nvme_get_ctrl(ns
->ctrl
);
1745 nvme_put_ns_from_disk(head
, srcu_idx
);
1748 case NVME_IOCTL_ADMIN_CMD
:
1749 ret
= nvme_user_cmd(ctrl
, NULL
, argp
);
1751 case NVME_IOCTL_ADMIN64_CMD
:
1752 ret
= nvme_user_cmd64(ctrl
, NULL
, argp
);
1755 ret
= sed_ioctl(ctrl
->opal_dev
, cmd
, argp
);
1758 nvme_put_ctrl(ctrl
);
1762 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
1763 unsigned int cmd
, unsigned long arg
)
1765 struct nvme_ns_head
*head
= NULL
;
1766 void __user
*argp
= (void __user
*)arg
;
1770 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
1772 return -EWOULDBLOCK
;
1775 * Handle ioctls that apply to the controller instead of the namespace
1776 * seperately and drop the ns SRCU reference early. This avoids a
1777 * deadlock when deleting namespaces using the passthrough interface.
1779 if (is_ctrl_ioctl(cmd
))
1780 return nvme_handle_ctrl_ioctl(ns
, cmd
, argp
, head
, srcu_idx
);
1784 force_successful_syscall_return();
1785 ret
= ns
->head
->ns_id
;
1787 case NVME_IOCTL_IO_CMD
:
1788 ret
= nvme_user_cmd(ns
->ctrl
, ns
, argp
);
1790 case NVME_IOCTL_SUBMIT_IO
:
1791 ret
= nvme_submit_io(ns
, argp
);
1793 case NVME_IOCTL_IO64_CMD
:
1794 ret
= nvme_user_cmd64(ns
->ctrl
, ns
, argp
);
1798 ret
= nvme_nvm_ioctl(ns
, cmd
, arg
);
1803 nvme_put_ns_from_disk(head
, srcu_idx
);
1807 #ifdef CONFIG_COMPAT
1808 struct nvme_user_io32
{
1821 } __attribute__((__packed__
));
1823 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
1825 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
1826 unsigned int cmd
, unsigned long arg
)
1829 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
1830 * between 32 bit programs and 64 bit kernel.
1831 * The cause is that the results of sizeof(struct nvme_user_io),
1832 * which is used to define NVME_IOCTL_SUBMIT_IO,
1833 * are not same between 32 bit compiler and 64 bit compiler.
1834 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
1835 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
1836 * Other IOCTL numbers are same between 32 bit and 64 bit.
1837 * So there is nothing to do regarding to other IOCTL numbers.
1839 if (cmd
== NVME_IOCTL_SUBMIT_IO32
)
1840 return nvme_ioctl(bdev
, mode
, NVME_IOCTL_SUBMIT_IO
, arg
);
1842 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
1845 #define nvme_compat_ioctl NULL
1846 #endif /* CONFIG_COMPAT */
1848 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1850 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1852 #ifdef CONFIG_NVME_MULTIPATH
1853 /* should never be called due to GENHD_FL_HIDDEN */
1854 if (WARN_ON_ONCE(ns
->head
->disk
))
1857 if (!kref_get_unless_zero(&ns
->kref
))
1859 if (!try_module_get(ns
->ctrl
->ops
->module
))
1870 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1872 struct nvme_ns
*ns
= disk
->private_data
;
1874 module_put(ns
->ctrl
->ops
->module
);
1878 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1880 /* some standard values */
1881 geo
->heads
= 1 << 6;
1882 geo
->sectors
= 1 << 5;
1883 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1887 #ifdef CONFIG_BLK_DEV_INTEGRITY
1888 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
,
1889 u32 max_integrity_segments
)
1891 struct blk_integrity integrity
;
1893 memset(&integrity
, 0, sizeof(integrity
));
1895 case NVME_NS_DPS_PI_TYPE3
:
1896 integrity
.profile
= &t10_pi_type3_crc
;
1897 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1898 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1900 case NVME_NS_DPS_PI_TYPE1
:
1901 case NVME_NS_DPS_PI_TYPE2
:
1902 integrity
.profile
= &t10_pi_type1_crc
;
1903 integrity
.tag_size
= sizeof(u16
);
1904 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1907 integrity
.profile
= NULL
;
1910 integrity
.tuple_size
= ms
;
1911 blk_integrity_register(disk
, &integrity
);
1912 blk_queue_max_integrity_segments(disk
->queue
, max_integrity_segments
);
1915 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
,
1916 u32 max_integrity_segments
)
1919 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1921 static void nvme_config_discard(struct gendisk
*disk
, struct nvme_ns
*ns
)
1923 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1924 struct request_queue
*queue
= disk
->queue
;
1925 u32 size
= queue_logical_block_size(queue
);
1927 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)) {
1928 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, queue
);
1932 if (ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
)
1933 size
*= ns
->sws
* ns
->sgs
;
1935 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1936 NVME_DSM_MAX_RANGES
);
1938 queue
->limits
.discard_alignment
= 0;
1939 queue
->limits
.discard_granularity
= size
;
1941 /* If discard is already enabled, don't reset queue limits */
1942 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD
, queue
))
1945 blk_queue_max_discard_sectors(queue
, UINT_MAX
);
1946 blk_queue_max_discard_segments(queue
, NVME_DSM_MAX_RANGES
);
1948 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1949 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1953 * Even though NVMe spec explicitly states that MDTS is not applicable to the
1954 * write-zeroes, we are cautious and limit the size to the controllers
1955 * max_hw_sectors value, which is based on the MDTS field and possibly other
1958 static void nvme_config_write_zeroes(struct request_queue
*q
,
1959 struct nvme_ctrl
*ctrl
)
1961 if ((ctrl
->oncs
& NVME_CTRL_ONCS_WRITE_ZEROES
) &&
1962 !(ctrl
->quirks
& NVME_QUIRK_DISABLE_WRITE_ZEROES
))
1963 blk_queue_max_write_zeroes_sectors(q
, ctrl
->max_hw_sectors
);
1966 static bool nvme_ns_ids_valid(struct nvme_ns_ids
*ids
)
1968 return !uuid_is_null(&ids
->uuid
) ||
1969 memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)) ||
1970 memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
));
1973 static bool nvme_ns_ids_equal(struct nvme_ns_ids
*a
, struct nvme_ns_ids
*b
)
1975 return uuid_equal(&a
->uuid
, &b
->uuid
) &&
1976 memcmp(&a
->nguid
, &b
->nguid
, sizeof(a
->nguid
)) == 0 &&
1977 memcmp(&a
->eui64
, &b
->eui64
, sizeof(a
->eui64
)) == 0 &&
1981 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1982 u32
*phys_bs
, u32
*io_opt
)
1984 struct streams_directive_params s
;
1987 if (!ctrl
->nr_streams
)
1990 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->head
->ns_id
);
1994 ns
->sws
= le32_to_cpu(s
.sws
);
1995 ns
->sgs
= le16_to_cpu(s
.sgs
);
1998 *phys_bs
= ns
->sws
* (1 << ns
->lba_shift
);
2000 *io_opt
= *phys_bs
* ns
->sgs
;
2006 static int nvme_configure_metadata(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
2008 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
2011 * The PI implementation requires the metadata size to be equal to the
2012 * t10 pi tuple size.
2014 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
2015 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
2016 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
2020 ns
->features
&= ~(NVME_NS_METADATA_SUPPORTED
| NVME_NS_EXT_LBAS
);
2021 if (!ns
->ms
|| !(ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
2023 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
2025 * The NVMe over Fabrics specification only supports metadata as
2026 * part of the extended data LBA. We rely on HCA/HBA support to
2027 * remap the separate metadata buffer from the block layer.
2029 if (WARN_ON_ONCE(!(id
->flbas
& NVME_NS_FLBAS_META_EXT
)))
2031 if (ctrl
->max_integrity_segments
)
2033 (NVME_NS_METADATA_SUPPORTED
| NVME_NS_EXT_LBAS
);
2036 * For PCIe controllers, we can't easily remap the separate
2037 * metadata buffer from the block layer and thus require a
2038 * separate metadata buffer for block layer metadata/PI support.
2039 * We allow extended LBAs for the passthrough interface, though.
2041 if (id
->flbas
& NVME_NS_FLBAS_META_EXT
)
2042 ns
->features
|= NVME_NS_EXT_LBAS
;
2044 ns
->features
|= NVME_NS_METADATA_SUPPORTED
;
2050 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
2051 struct request_queue
*q
)
2053 bool vwc
= ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
;
2055 if (ctrl
->max_hw_sectors
) {
2057 (ctrl
->max_hw_sectors
/ (NVME_CTRL_PAGE_SIZE
>> 9)) + 1;
2059 max_segments
= min_not_zero(max_segments
, ctrl
->max_segments
);
2060 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
2061 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
2063 blk_queue_virt_boundary(q
, NVME_CTRL_PAGE_SIZE
- 1);
2064 blk_queue_dma_alignment(q
, 7);
2065 blk_queue_write_cache(q
, vwc
, vwc
);
2068 static void nvme_update_disk_info(struct gendisk
*disk
,
2069 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
2071 sector_t capacity
= nvme_lba_to_sect(ns
, le64_to_cpu(id
->nsze
));
2072 unsigned short bs
= 1 << ns
->lba_shift
;
2073 u32 atomic_bs
, phys_bs
, io_opt
= 0;
2076 * The block layer can't support LBA sizes larger than the page size
2077 * yet, so catch this early and don't allow block I/O.
2079 if (ns
->lba_shift
> PAGE_SHIFT
) {
2084 blk_integrity_unregister(disk
);
2086 atomic_bs
= phys_bs
= bs
;
2087 nvme_setup_streams_ns(ns
->ctrl
, ns
, &phys_bs
, &io_opt
);
2088 if (id
->nabo
== 0) {
2090 * Bit 1 indicates whether NAWUPF is defined for this namespace
2091 * and whether it should be used instead of AWUPF. If NAWUPF ==
2092 * 0 then AWUPF must be used instead.
2094 if (id
->nsfeat
& NVME_NS_FEAT_ATOMICS
&& id
->nawupf
)
2095 atomic_bs
= (1 + le16_to_cpu(id
->nawupf
)) * bs
;
2097 atomic_bs
= (1 + ns
->ctrl
->subsys
->awupf
) * bs
;
2100 if (id
->nsfeat
& NVME_NS_FEAT_IO_OPT
) {
2101 /* NPWG = Namespace Preferred Write Granularity */
2102 phys_bs
= bs
* (1 + le16_to_cpu(id
->npwg
));
2103 /* NOWS = Namespace Optimal Write Size */
2104 io_opt
= bs
* (1 + le16_to_cpu(id
->nows
));
2107 blk_queue_logical_block_size(disk
->queue
, bs
);
2109 * Linux filesystems assume writing a single physical block is
2110 * an atomic operation. Hence limit the physical block size to the
2111 * value of the Atomic Write Unit Power Fail parameter.
2113 blk_queue_physical_block_size(disk
->queue
, min(phys_bs
, atomic_bs
));
2114 blk_queue_io_min(disk
->queue
, phys_bs
);
2115 blk_queue_io_opt(disk
->queue
, io_opt
);
2118 * Register a metadata profile for PI, or the plain non-integrity NVMe
2119 * metadata masquerading as Type 0 if supported, otherwise reject block
2120 * I/O to namespaces with metadata except when the namespace supports
2121 * PI, as it can strip/insert in that case.
2124 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
) &&
2125 (ns
->features
& NVME_NS_METADATA_SUPPORTED
))
2126 nvme_init_integrity(disk
, ns
->ms
, ns
->pi_type
,
2127 ns
->ctrl
->max_integrity_segments
);
2128 else if (!nvme_ns_has_pi(ns
))
2132 set_capacity_and_notify(disk
, capacity
);
2134 nvme_config_discard(disk
, ns
);
2135 nvme_config_write_zeroes(disk
->queue
, ns
->ctrl
);
2137 if ((id
->nsattr
& NVME_NS_ATTR_RO
) ||
2138 test_bit(NVME_NS_FORCE_RO
, &ns
->flags
))
2139 set_disk_ro(disk
, true);
2142 static inline bool nvme_first_scan(struct gendisk
*disk
)
2144 /* nvme_alloc_ns() scans the disk prior to adding it */
2145 return !(disk
->flags
& GENHD_FL_UP
);
2148 static void nvme_set_chunk_sectors(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
2150 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
2153 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) &&
2154 is_power_of_2(ctrl
->max_hw_sectors
))
2155 iob
= ctrl
->max_hw_sectors
;
2157 iob
= nvme_lba_to_sect(ns
, le16_to_cpu(id
->noiob
));
2162 if (!is_power_of_2(iob
)) {
2163 if (nvme_first_scan(ns
->disk
))
2164 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
2165 ns
->disk
->disk_name
, iob
);
2169 if (blk_queue_is_zoned(ns
->disk
->queue
)) {
2170 if (nvme_first_scan(ns
->disk
))
2171 pr_warn("%s: ignoring zoned namespace IO boundary\n",
2172 ns
->disk
->disk_name
);
2176 blk_queue_chunk_sectors(ns
->queue
, iob
);
2179 static int nvme_update_ns_info(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
2181 unsigned lbaf
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
2184 blk_mq_freeze_queue(ns
->disk
->queue
);
2185 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
2186 nvme_set_queue_limits(ns
->ctrl
, ns
->queue
);
2188 if (ns
->head
->ids
.csi
== NVME_CSI_ZNS
) {
2189 ret
= nvme_update_zone_info(ns
, lbaf
);
2194 ret
= nvme_configure_metadata(ns
, id
);
2197 nvme_set_chunk_sectors(ns
, id
);
2198 nvme_update_disk_info(ns
->disk
, ns
, id
);
2199 blk_mq_unfreeze_queue(ns
->disk
->queue
);
2201 if (blk_queue_is_zoned(ns
->queue
)) {
2202 ret
= nvme_revalidate_zones(ns
);
2203 if (ret
&& !nvme_first_scan(ns
->disk
))
2207 #ifdef CONFIG_NVME_MULTIPATH
2208 if (ns
->head
->disk
) {
2209 blk_mq_freeze_queue(ns
->head
->disk
->queue
);
2210 nvme_update_disk_info(ns
->head
->disk
, ns
, id
);
2211 blk_stack_limits(&ns
->head
->disk
->queue
->limits
,
2212 &ns
->queue
->limits
, 0);
2213 blk_queue_update_readahead(ns
->head
->disk
->queue
);
2214 blk_mq_unfreeze_queue(ns
->head
->disk
->queue
);
2220 blk_mq_unfreeze_queue(ns
->disk
->queue
);
2224 static char nvme_pr_type(enum pr_type type
)
2227 case PR_WRITE_EXCLUSIVE
:
2229 case PR_EXCLUSIVE_ACCESS
:
2231 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
2233 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
2235 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
2237 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
2244 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
2245 u64 key
, u64 sa_key
, u8 op
)
2247 struct nvme_ns_head
*head
= NULL
;
2249 struct nvme_command c
;
2251 u8 data
[16] = { 0, };
2253 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
2255 return -EWOULDBLOCK
;
2257 put_unaligned_le64(key
, &data
[0]);
2258 put_unaligned_le64(sa_key
, &data
[8]);
2260 memset(&c
, 0, sizeof(c
));
2261 c
.common
.opcode
= op
;
2262 c
.common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
2263 c
.common
.cdw10
= cpu_to_le32(cdw10
);
2265 ret
= nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
2266 nvme_put_ns_from_disk(head
, srcu_idx
);
2270 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
2271 u64
new, unsigned flags
)
2275 if (flags
& ~PR_FL_IGNORE_KEY
)
2278 cdw10
= old
? 2 : 0;
2279 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
2280 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
2281 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
2284 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
2285 enum pr_type type
, unsigned flags
)
2289 if (flags
& ~PR_FL_IGNORE_KEY
)
2292 cdw10
= nvme_pr_type(type
) << 8;
2293 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
2294 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
2297 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
2298 enum pr_type type
, bool abort
)
2300 u32 cdw10
= nvme_pr_type(type
) << 8 | (abort
? 2 : 1);
2301 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
2304 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
2306 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
2307 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
2310 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
2312 u32 cdw10
= nvme_pr_type(type
) << 8 | (key
? 1 << 3 : 0);
2313 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
2316 static const struct pr_ops nvme_pr_ops
= {
2317 .pr_register
= nvme_pr_register
,
2318 .pr_reserve
= nvme_pr_reserve
,
2319 .pr_release
= nvme_pr_release
,
2320 .pr_preempt
= nvme_pr_preempt
,
2321 .pr_clear
= nvme_pr_clear
,
2324 #ifdef CONFIG_BLK_SED_OPAL
2325 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
2328 struct nvme_ctrl
*ctrl
= data
;
2329 struct nvme_command cmd
;
2331 memset(&cmd
, 0, sizeof(cmd
));
2333 cmd
.common
.opcode
= nvme_admin_security_send
;
2335 cmd
.common
.opcode
= nvme_admin_security_recv
;
2336 cmd
.common
.nsid
= 0;
2337 cmd
.common
.cdw10
= cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
2338 cmd
.common
.cdw11
= cpu_to_le32(len
);
2340 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
, 0,
2341 NVME_QID_ANY
, 1, 0, false);
2343 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
2344 #endif /* CONFIG_BLK_SED_OPAL */
2346 static const struct block_device_operations nvme_bdev_ops
= {
2347 .owner
= THIS_MODULE
,
2348 .ioctl
= nvme_ioctl
,
2349 .compat_ioctl
= nvme_compat_ioctl
,
2351 .release
= nvme_release
,
2352 .getgeo
= nvme_getgeo
,
2353 .report_zones
= nvme_report_zones
,
2354 .pr_ops
= &nvme_pr_ops
,
2357 #ifdef CONFIG_NVME_MULTIPATH
2358 static int nvme_ns_head_open(struct block_device
*bdev
, fmode_t mode
)
2360 struct nvme_ns_head
*head
= bdev
->bd_disk
->private_data
;
2362 if (!kref_get_unless_zero(&head
->ref
))
2367 static void nvme_ns_head_release(struct gendisk
*disk
, fmode_t mode
)
2369 nvme_put_ns_head(disk
->private_data
);
2372 const struct block_device_operations nvme_ns_head_ops
= {
2373 .owner
= THIS_MODULE
,
2374 .submit_bio
= nvme_ns_head_submit_bio
,
2375 .open
= nvme_ns_head_open
,
2376 .release
= nvme_ns_head_release
,
2377 .ioctl
= nvme_ioctl
,
2378 .compat_ioctl
= nvme_compat_ioctl
,
2379 .getgeo
= nvme_getgeo
,
2380 .report_zones
= nvme_report_zones
,
2381 .pr_ops
= &nvme_pr_ops
,
2383 #endif /* CONFIG_NVME_MULTIPATH */
2385 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
2387 unsigned long timeout
=
2388 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
2389 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
2392 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2395 if ((csts
& NVME_CSTS_RDY
) == bit
)
2398 usleep_range(1000, 2000);
2399 if (fatal_signal_pending(current
))
2401 if (time_after(jiffies
, timeout
)) {
2402 dev_err(ctrl
->device
,
2403 "Device not ready; aborting %s, CSTS=0x%x\n",
2404 enabled
? "initialisation" : "reset", csts
);
2413 * If the device has been passed off to us in an enabled state, just clear
2414 * the enabled bit. The spec says we should set the 'shutdown notification
2415 * bits', but doing so may cause the device to complete commands to the
2416 * admin queue ... and we don't know what memory that might be pointing at!
2418 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
)
2422 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2423 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
2425 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2429 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
2430 msleep(NVME_QUIRK_DELAY_AMOUNT
);
2432 return nvme_wait_ready(ctrl
, ctrl
->cap
, false);
2434 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
2436 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
)
2438 unsigned dev_page_min
;
2441 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
2443 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2446 dev_page_min
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2448 if (NVME_CTRL_PAGE_SHIFT
< dev_page_min
) {
2449 dev_err(ctrl
->device
,
2450 "Minimum device page size %u too large for host (%u)\n",
2451 1 << dev_page_min
, 1 << NVME_CTRL_PAGE_SHIFT
);
2455 if (NVME_CAP_CSS(ctrl
->cap
) & NVME_CAP_CSS_CSI
)
2456 ctrl
->ctrl_config
= NVME_CC_CSS_CSI
;
2458 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
2459 ctrl
->ctrl_config
|= (NVME_CTRL_PAGE_SHIFT
- 12) << NVME_CC_MPS_SHIFT
;
2460 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
2461 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
2462 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
2464 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2467 return nvme_wait_ready(ctrl
, ctrl
->cap
, true);
2469 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
2471 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
2473 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
2477 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2478 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
2480 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2484 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2485 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
2489 if (fatal_signal_pending(current
))
2491 if (time_after(jiffies
, timeout
)) {
2492 dev_err(ctrl
->device
,
2493 "Device shutdown incomplete; abort shutdown\n");
2500 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
2502 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
2507 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
2510 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
2511 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
2514 dev_warn_once(ctrl
->device
,
2515 "could not set timestamp (%d)\n", ret
);
2519 static int nvme_configure_acre(struct nvme_ctrl
*ctrl
)
2521 struct nvme_feat_host_behavior
*host
;
2524 /* Don't bother enabling the feature if retry delay is not reported */
2528 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
2532 host
->acre
= NVME_ENABLE_ACRE
;
2533 ret
= nvme_set_features(ctrl
, NVME_FEAT_HOST_BEHAVIOR
, 0,
2534 host
, sizeof(*host
), NULL
);
2539 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
2542 * APST (Autonomous Power State Transition) lets us program a
2543 * table of power state transitions that the controller will
2544 * perform automatically. We configure it with a simple
2545 * heuristic: we are willing to spend at most 2% of the time
2546 * transitioning between power states. Therefore, when running
2547 * in any given state, we will enter the next lower-power
2548 * non-operational state after waiting 50 * (enlat + exlat)
2549 * microseconds, as long as that state's exit latency is under
2550 * the requested maximum latency.
2552 * We will not autonomously enter any non-operational state for
2553 * which the total latency exceeds ps_max_latency_us. Users
2554 * can set ps_max_latency_us to zero to turn off APST.
2558 struct nvme_feat_auto_pst
*table
;
2564 * If APST isn't supported or if we haven't been initialized yet,
2565 * then don't do anything.
2570 if (ctrl
->npss
> 31) {
2571 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
2575 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2579 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
2580 /* Turn off APST. */
2582 dev_dbg(ctrl
->device
, "APST disabled\n");
2584 __le64 target
= cpu_to_le64(0);
2588 * Walk through all states from lowest- to highest-power.
2589 * According to the spec, lower-numbered states use more
2590 * power. NPSS, despite the name, is the index of the
2591 * lowest-power state, not the number of states.
2593 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
2594 u64 total_latency_us
, exit_latency_us
, transition_ms
;
2597 table
->entries
[state
] = target
;
2600 * Don't allow transitions to the deepest state
2601 * if it's quirked off.
2603 if (state
== ctrl
->npss
&&
2604 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
2608 * Is this state a useful non-operational state for
2609 * higher-power states to autonomously transition to?
2611 if (!(ctrl
->psd
[state
].flags
&
2612 NVME_PS_FLAGS_NON_OP_STATE
))
2616 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
2617 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
2622 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
2625 * This state is good. Use it as the APST idle
2626 * target for higher power states.
2628 transition_ms
= total_latency_us
+ 19;
2629 do_div(transition_ms
, 20);
2630 if (transition_ms
> (1 << 24) - 1)
2631 transition_ms
= (1 << 24) - 1;
2633 target
= cpu_to_le64((state
<< 3) |
2634 (transition_ms
<< 8));
2639 if (total_latency_us
> max_lat_us
)
2640 max_lat_us
= total_latency_us
;
2646 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
2648 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2649 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
2653 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
2654 table
, sizeof(*table
), NULL
);
2656 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
2662 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
2664 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2668 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
2669 case PM_QOS_LATENCY_ANY
:
2677 if (ctrl
->ps_max_latency_us
!= latency
) {
2678 ctrl
->ps_max_latency_us
= latency
;
2679 nvme_configure_apst(ctrl
);
2683 struct nvme_core_quirk_entry
{
2685 * NVMe model and firmware strings are padded with spaces. For
2686 * simplicity, strings in the quirk table are padded with NULLs
2692 unsigned long quirks
;
2695 static const struct nvme_core_quirk_entry core_quirks
[] = {
2698 * This Toshiba device seems to die using any APST states. See:
2699 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2702 .mn
= "THNSF5256GPUK TOSHIBA",
2703 .quirks
= NVME_QUIRK_NO_APST
,
2707 * This LiteON CL1-3D*-Q11 firmware version has a race
2708 * condition associated with actions related to suspend to idle
2709 * LiteON has resolved the problem in future firmware
2713 .quirks
= NVME_QUIRK_SIMPLE_SUSPEND
,
2717 /* match is null-terminated but idstr is space-padded. */
2718 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
2725 matchlen
= strlen(match
);
2726 WARN_ON_ONCE(matchlen
> len
);
2728 if (memcmp(idstr
, match
, matchlen
))
2731 for (; matchlen
< len
; matchlen
++)
2732 if (idstr
[matchlen
] != ' ')
2738 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
2739 const struct nvme_core_quirk_entry
*q
)
2741 return q
->vid
== le16_to_cpu(id
->vid
) &&
2742 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
2743 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
2746 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
2747 struct nvme_id_ctrl
*id
)
2752 if(!(ctrl
->quirks
& NVME_QUIRK_IGNORE_DEV_SUBNQN
)) {
2753 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
2754 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
2755 strlcpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
2759 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
2760 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
2763 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2764 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
2765 "nqn.2014.08.org.nvmexpress:%04x%04x",
2766 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
2767 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
2768 off
+= sizeof(id
->sn
);
2769 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
2770 off
+= sizeof(id
->mn
);
2771 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
2774 static void nvme_release_subsystem(struct device
*dev
)
2776 struct nvme_subsystem
*subsys
=
2777 container_of(dev
, struct nvme_subsystem
, dev
);
2779 if (subsys
->instance
>= 0)
2780 ida_simple_remove(&nvme_instance_ida
, subsys
->instance
);
2784 static void nvme_destroy_subsystem(struct kref
*ref
)
2786 struct nvme_subsystem
*subsys
=
2787 container_of(ref
, struct nvme_subsystem
, ref
);
2789 mutex_lock(&nvme_subsystems_lock
);
2790 list_del(&subsys
->entry
);
2791 mutex_unlock(&nvme_subsystems_lock
);
2793 ida_destroy(&subsys
->ns_ida
);
2794 device_del(&subsys
->dev
);
2795 put_device(&subsys
->dev
);
2798 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
2800 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
2803 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
2805 struct nvme_subsystem
*subsys
;
2807 lockdep_assert_held(&nvme_subsystems_lock
);
2810 * Fail matches for discovery subsystems. This results
2811 * in each discovery controller bound to a unique subsystem.
2812 * This avoids issues with validating controller values
2813 * that can only be true when there is a single unique subsystem.
2814 * There may be multiple and completely independent entities
2815 * that provide discovery controllers.
2817 if (!strcmp(subsysnqn
, NVME_DISC_SUBSYS_NAME
))
2820 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
2821 if (strcmp(subsys
->subnqn
, subsysnqn
))
2823 if (!kref_get_unless_zero(&subsys
->ref
))
2831 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
2832 struct device_attribute subsys_attr_##_name = \
2833 __ATTR(_name, _mode, _show, NULL)
2835 static ssize_t
nvme_subsys_show_nqn(struct device
*dev
,
2836 struct device_attribute
*attr
,
2839 struct nvme_subsystem
*subsys
=
2840 container_of(dev
, struct nvme_subsystem
, dev
);
2842 return snprintf(buf
, PAGE_SIZE
, "%s\n", subsys
->subnqn
);
2844 static SUBSYS_ATTR_RO(subsysnqn
, S_IRUGO
, nvme_subsys_show_nqn
);
2846 #define nvme_subsys_show_str_function(field) \
2847 static ssize_t subsys_##field##_show(struct device *dev, \
2848 struct device_attribute *attr, char *buf) \
2850 struct nvme_subsystem *subsys = \
2851 container_of(dev, struct nvme_subsystem, dev); \
2852 return sprintf(buf, "%.*s\n", \
2853 (int)sizeof(subsys->field), subsys->field); \
2855 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2857 nvme_subsys_show_str_function(model
);
2858 nvme_subsys_show_str_function(serial
);
2859 nvme_subsys_show_str_function(firmware_rev
);
2861 static struct attribute
*nvme_subsys_attrs
[] = {
2862 &subsys_attr_model
.attr
,
2863 &subsys_attr_serial
.attr
,
2864 &subsys_attr_firmware_rev
.attr
,
2865 &subsys_attr_subsysnqn
.attr
,
2866 #ifdef CONFIG_NVME_MULTIPATH
2867 &subsys_attr_iopolicy
.attr
,
2872 static struct attribute_group nvme_subsys_attrs_group
= {
2873 .attrs
= nvme_subsys_attrs
,
2876 static const struct attribute_group
*nvme_subsys_attrs_groups
[] = {
2877 &nvme_subsys_attrs_group
,
2881 static inline bool nvme_discovery_ctrl(struct nvme_ctrl
*ctrl
)
2883 return ctrl
->opts
&& ctrl
->opts
->discovery_nqn
;
2886 static bool nvme_validate_cntlid(struct nvme_subsystem
*subsys
,
2887 struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2889 struct nvme_ctrl
*tmp
;
2891 lockdep_assert_held(&nvme_subsystems_lock
);
2893 list_for_each_entry(tmp
, &subsys
->ctrls
, subsys_entry
) {
2894 if (nvme_state_terminal(tmp
))
2897 if (tmp
->cntlid
== ctrl
->cntlid
) {
2898 dev_err(ctrl
->device
,
2899 "Duplicate cntlid %u with %s, rejecting\n",
2900 ctrl
->cntlid
, dev_name(tmp
->device
));
2904 if ((id
->cmic
& NVME_CTRL_CMIC_MULTI_CTRL
) ||
2905 nvme_discovery_ctrl(ctrl
))
2908 dev_err(ctrl
->device
,
2909 "Subsystem does not support multiple controllers\n");
2916 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2918 struct nvme_subsystem
*subsys
, *found
;
2921 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
2925 subsys
->instance
= -1;
2926 mutex_init(&subsys
->lock
);
2927 kref_init(&subsys
->ref
);
2928 INIT_LIST_HEAD(&subsys
->ctrls
);
2929 INIT_LIST_HEAD(&subsys
->nsheads
);
2930 nvme_init_subnqn(subsys
, ctrl
, id
);
2931 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
2932 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
2933 memcpy(subsys
->firmware_rev
, id
->fr
, sizeof(subsys
->firmware_rev
));
2934 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
2935 subsys
->cmic
= id
->cmic
;
2936 subsys
->awupf
= le16_to_cpu(id
->awupf
);
2937 #ifdef CONFIG_NVME_MULTIPATH
2938 subsys
->iopolicy
= NVME_IOPOLICY_NUMA
;
2941 subsys
->dev
.class = nvme_subsys_class
;
2942 subsys
->dev
.release
= nvme_release_subsystem
;
2943 subsys
->dev
.groups
= nvme_subsys_attrs_groups
;
2944 dev_set_name(&subsys
->dev
, "nvme-subsys%d", ctrl
->instance
);
2945 device_initialize(&subsys
->dev
);
2947 mutex_lock(&nvme_subsystems_lock
);
2948 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
2950 put_device(&subsys
->dev
);
2953 if (!nvme_validate_cntlid(subsys
, ctrl
, id
)) {
2955 goto out_put_subsystem
;
2958 ret
= device_add(&subsys
->dev
);
2960 dev_err(ctrl
->device
,
2961 "failed to register subsystem device.\n");
2962 put_device(&subsys
->dev
);
2965 ida_init(&subsys
->ns_ida
);
2966 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
2969 ret
= sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
2970 dev_name(ctrl
->device
));
2972 dev_err(ctrl
->device
,
2973 "failed to create sysfs link from subsystem.\n");
2974 goto out_put_subsystem
;
2978 subsys
->instance
= ctrl
->instance
;
2979 ctrl
->subsys
= subsys
;
2980 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
2981 mutex_unlock(&nvme_subsystems_lock
);
2985 nvme_put_subsystem(subsys
);
2987 mutex_unlock(&nvme_subsystems_lock
);
2991 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
, u8 csi
,
2992 void *log
, size_t size
, u64 offset
)
2994 struct nvme_command c
= { };
2995 u32 dwlen
= nvme_bytes_to_numd(size
);
2997 c
.get_log_page
.opcode
= nvme_admin_get_log_page
;
2998 c
.get_log_page
.nsid
= cpu_to_le32(nsid
);
2999 c
.get_log_page
.lid
= log_page
;
3000 c
.get_log_page
.lsp
= lsp
;
3001 c
.get_log_page
.numdl
= cpu_to_le16(dwlen
& ((1 << 16) - 1));
3002 c
.get_log_page
.numdu
= cpu_to_le16(dwlen
>> 16);
3003 c
.get_log_page
.lpol
= cpu_to_le32(lower_32_bits(offset
));
3004 c
.get_log_page
.lpou
= cpu_to_le32(upper_32_bits(offset
));
3005 c
.get_log_page
.csi
= csi
;
3007 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
3010 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
, u8 csi
,
3011 struct nvme_effects_log
**log
)
3013 struct nvme_effects_log
*cel
= xa_load(&ctrl
->cels
, csi
);
3019 cel
= kzalloc(sizeof(*cel
), GFP_KERNEL
);
3023 ret
= nvme_get_log(ctrl
, 0x00, NVME_LOG_CMD_EFFECTS
, 0, csi
,
3024 cel
, sizeof(*cel
), 0);
3030 xa_store(&ctrl
->cels
, csi
, cel
, GFP_KERNEL
);
3037 * Initialize the cached copies of the Identify data and various controller
3038 * register in our nvme_ctrl structure. This should be called as soon as
3039 * the admin queue is fully up and running.
3041 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
3043 struct nvme_id_ctrl
*id
;
3044 int ret
, page_shift
;
3046 bool prev_apst_enabled
;
3048 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
3050 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
3053 page_shift
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
3054 ctrl
->sqsize
= min_t(u16
, NVME_CAP_MQES(ctrl
->cap
), ctrl
->sqsize
);
3056 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
3057 ctrl
->subsystem
= NVME_CAP_NSSRC(ctrl
->cap
);
3059 ret
= nvme_identify_ctrl(ctrl
, &id
);
3061 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
3065 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
3066 ret
= nvme_get_effects_log(ctrl
, NVME_CSI_NVM
, &ctrl
->effects
);
3071 if (!(ctrl
->ops
->flags
& NVME_F_FABRICS
))
3072 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
3074 if (!ctrl
->identified
) {
3077 ret
= nvme_init_subsystem(ctrl
, id
);
3082 * Check for quirks. Quirk can depend on firmware version,
3083 * so, in principle, the set of quirks present can change
3084 * across a reset. As a possible future enhancement, we
3085 * could re-scan for quirks every time we reinitialize
3086 * the device, but we'd have to make sure that the driver
3087 * behaves intelligently if the quirks change.
3089 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
3090 if (quirk_matches(id
, &core_quirks
[i
]))
3091 ctrl
->quirks
|= core_quirks
[i
].quirks
;
3095 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
3096 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3097 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
3100 ctrl
->crdt
[0] = le16_to_cpu(id
->crdt1
);
3101 ctrl
->crdt
[1] = le16_to_cpu(id
->crdt2
);
3102 ctrl
->crdt
[2] = le16_to_cpu(id
->crdt3
);
3104 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
3105 ctrl
->oncs
= le16_to_cpu(id
->oncs
);
3106 ctrl
->mtfa
= le16_to_cpu(id
->mtfa
);
3107 ctrl
->oaes
= le32_to_cpu(id
->oaes
);
3108 ctrl
->wctemp
= le16_to_cpu(id
->wctemp
);
3109 ctrl
->cctemp
= le16_to_cpu(id
->cctemp
);
3111 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
3112 ctrl
->vwc
= id
->vwc
;
3114 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
3116 max_hw_sectors
= UINT_MAX
;
3117 ctrl
->max_hw_sectors
=
3118 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
3120 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
3121 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
3122 ctrl
->kas
= le16_to_cpu(id
->kas
);
3123 ctrl
->max_namespaces
= le32_to_cpu(id
->mnan
);
3124 ctrl
->ctratt
= le32_to_cpu(id
->ctratt
);
3128 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / USEC_PER_SEC
;
3130 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
3131 shutdown_timeout
, 60);
3133 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
3134 dev_info(ctrl
->device
,
3135 "Shutdown timeout set to %u seconds\n",
3136 ctrl
->shutdown_timeout
);
3138 ctrl
->shutdown_timeout
= shutdown_timeout
;
3140 ctrl
->npss
= id
->npss
;
3141 ctrl
->apsta
= id
->apsta
;
3142 prev_apst_enabled
= ctrl
->apst_enabled
;
3143 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
3144 if (force_apst
&& id
->apsta
) {
3145 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3146 ctrl
->apst_enabled
= true;
3148 ctrl
->apst_enabled
= false;
3151 ctrl
->apst_enabled
= id
->apsta
;
3153 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
3155 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
3156 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
3157 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
3158 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
3159 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
3162 * In fabrics we need to verify the cntlid matches the
3165 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
3166 dev_err(ctrl
->device
,
3167 "Mismatching cntlid: Connect %u vs Identify "
3169 ctrl
->cntlid
, le16_to_cpu(id
->cntlid
));
3174 if (!nvme_discovery_ctrl(ctrl
) && !ctrl
->kas
) {
3175 dev_err(ctrl
->device
,
3176 "keep-alive support is mandatory for fabrics\n");
3181 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
3182 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
3183 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
3184 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
3187 ret
= nvme_mpath_init(ctrl
, id
);
3193 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
3194 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
3195 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
3196 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
3198 ret
= nvme_configure_apst(ctrl
);
3202 ret
= nvme_configure_timestamp(ctrl
);
3206 ret
= nvme_configure_directives(ctrl
);
3210 ret
= nvme_configure_acre(ctrl
);
3214 if (!ctrl
->identified
&& !nvme_discovery_ctrl(ctrl
)) {
3215 ret
= nvme_hwmon_init(ctrl
);
3220 ctrl
->identified
= true;
3228 EXPORT_SYMBOL_GPL(nvme_init_identify
);
3230 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
3232 struct nvme_ctrl
*ctrl
=
3233 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3235 switch (ctrl
->state
) {
3236 case NVME_CTRL_LIVE
:
3239 return -EWOULDBLOCK
;
3242 nvme_get_ctrl(ctrl
);
3243 if (!try_module_get(ctrl
->ops
->module
)) {
3244 nvme_put_ctrl(ctrl
);
3248 file
->private_data
= ctrl
;
3252 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
3254 struct nvme_ctrl
*ctrl
=
3255 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3257 module_put(ctrl
->ops
->module
);
3258 nvme_put_ctrl(ctrl
);
3262 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
3267 down_read(&ctrl
->namespaces_rwsem
);
3268 if (list_empty(&ctrl
->namespaces
)) {
3273 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
3274 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
3275 dev_warn(ctrl
->device
,
3276 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
3281 dev_warn(ctrl
->device
,
3282 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
3283 kref_get(&ns
->kref
);
3284 up_read(&ctrl
->namespaces_rwsem
);
3286 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
3291 up_read(&ctrl
->namespaces_rwsem
);
3295 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
3298 struct nvme_ctrl
*ctrl
= file
->private_data
;
3299 void __user
*argp
= (void __user
*)arg
;
3302 case NVME_IOCTL_ADMIN_CMD
:
3303 return nvme_user_cmd(ctrl
, NULL
, argp
);
3304 case NVME_IOCTL_ADMIN64_CMD
:
3305 return nvme_user_cmd64(ctrl
, NULL
, argp
);
3306 case NVME_IOCTL_IO_CMD
:
3307 return nvme_dev_user_cmd(ctrl
, argp
);
3308 case NVME_IOCTL_RESET
:
3309 dev_warn(ctrl
->device
, "resetting controller\n");
3310 return nvme_reset_ctrl_sync(ctrl
);
3311 case NVME_IOCTL_SUBSYS_RESET
:
3312 return nvme_reset_subsystem(ctrl
);
3313 case NVME_IOCTL_RESCAN
:
3314 nvme_queue_scan(ctrl
);
3321 static const struct file_operations nvme_dev_fops
= {
3322 .owner
= THIS_MODULE
,
3323 .open
= nvme_dev_open
,
3324 .release
= nvme_dev_release
,
3325 .unlocked_ioctl
= nvme_dev_ioctl
,
3326 .compat_ioctl
= compat_ptr_ioctl
,
3329 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
3330 struct device_attribute
*attr
, const char *buf
,
3333 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3336 ret
= nvme_reset_ctrl_sync(ctrl
);
3341 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
3343 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
3344 struct device_attribute
*attr
, const char *buf
,
3347 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3349 nvme_queue_scan(ctrl
);
3352 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
3354 static inline struct nvme_ns_head
*dev_to_ns_head(struct device
*dev
)
3356 struct gendisk
*disk
= dev_to_disk(dev
);
3358 if (disk
->fops
== &nvme_bdev_ops
)
3359 return nvme_get_ns_from_dev(dev
)->head
;
3361 return disk
->private_data
;
3364 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
3367 struct nvme_ns_head
*head
= dev_to_ns_head(dev
);
3368 struct nvme_ns_ids
*ids
= &head
->ids
;
3369 struct nvme_subsystem
*subsys
= head
->subsys
;
3370 int serial_len
= sizeof(subsys
->serial
);
3371 int model_len
= sizeof(subsys
->model
);
3373 if (!uuid_is_null(&ids
->uuid
))
3374 return sprintf(buf
, "uuid.%pU\n", &ids
->uuid
);
3376 if (memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3377 return sprintf(buf
, "eui.%16phN\n", ids
->nguid
);
3379 if (memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3380 return sprintf(buf
, "eui.%8phN\n", ids
->eui64
);
3382 while (serial_len
> 0 && (subsys
->serial
[serial_len
- 1] == ' ' ||
3383 subsys
->serial
[serial_len
- 1] == '\0'))
3385 while (model_len
> 0 && (subsys
->model
[model_len
- 1] == ' ' ||
3386 subsys
->model
[model_len
- 1] == '\0'))
3389 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", subsys
->vendor_id
,
3390 serial_len
, subsys
->serial
, model_len
, subsys
->model
,
3393 static DEVICE_ATTR_RO(wwid
);
3395 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
3398 return sprintf(buf
, "%pU\n", dev_to_ns_head(dev
)->ids
.nguid
);
3400 static DEVICE_ATTR_RO(nguid
);
3402 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
3405 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3407 /* For backward compatibility expose the NGUID to userspace if
3408 * we have no UUID set
3410 if (uuid_is_null(&ids
->uuid
)) {
3411 printk_ratelimited(KERN_WARNING
3412 "No UUID available providing old NGUID\n");
3413 return sprintf(buf
, "%pU\n", ids
->nguid
);
3415 return sprintf(buf
, "%pU\n", &ids
->uuid
);
3417 static DEVICE_ATTR_RO(uuid
);
3419 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
3422 return sprintf(buf
, "%8ph\n", dev_to_ns_head(dev
)->ids
.eui64
);
3424 static DEVICE_ATTR_RO(eui
);
3426 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
3429 return sprintf(buf
, "%d\n", dev_to_ns_head(dev
)->ns_id
);
3431 static DEVICE_ATTR_RO(nsid
);
3433 static struct attribute
*nvme_ns_id_attrs
[] = {
3434 &dev_attr_wwid
.attr
,
3435 &dev_attr_uuid
.attr
,
3436 &dev_attr_nguid
.attr
,
3438 &dev_attr_nsid
.attr
,
3439 #ifdef CONFIG_NVME_MULTIPATH
3440 &dev_attr_ana_grpid
.attr
,
3441 &dev_attr_ana_state
.attr
,
3446 static umode_t
nvme_ns_id_attrs_are_visible(struct kobject
*kobj
,
3447 struct attribute
*a
, int n
)
3449 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3450 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3452 if (a
== &dev_attr_uuid
.attr
) {
3453 if (uuid_is_null(&ids
->uuid
) &&
3454 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3457 if (a
== &dev_attr_nguid
.attr
) {
3458 if (!memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3461 if (a
== &dev_attr_eui
.attr
) {
3462 if (!memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3465 #ifdef CONFIG_NVME_MULTIPATH
3466 if (a
== &dev_attr_ana_grpid
.attr
|| a
== &dev_attr_ana_state
.attr
) {
3467 if (dev_to_disk(dev
)->fops
!= &nvme_bdev_ops
) /* per-path attr */
3469 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev
)->ctrl
))
3476 static const struct attribute_group nvme_ns_id_attr_group
= {
3477 .attrs
= nvme_ns_id_attrs
,
3478 .is_visible
= nvme_ns_id_attrs_are_visible
,
3481 const struct attribute_group
*nvme_ns_id_attr_groups
[] = {
3482 &nvme_ns_id_attr_group
,
3484 &nvme_nvm_attr_group
,
3489 #define nvme_show_str_function(field) \
3490 static ssize_t field##_show(struct device *dev, \
3491 struct device_attribute *attr, char *buf) \
3493 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3494 return sprintf(buf, "%.*s\n", \
3495 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3497 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3499 nvme_show_str_function(model
);
3500 nvme_show_str_function(serial
);
3501 nvme_show_str_function(firmware_rev
);
3503 #define nvme_show_int_function(field) \
3504 static ssize_t field##_show(struct device *dev, \
3505 struct device_attribute *attr, char *buf) \
3507 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3508 return sprintf(buf, "%d\n", ctrl->field); \
3510 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3512 nvme_show_int_function(cntlid
);
3513 nvme_show_int_function(numa_node
);
3514 nvme_show_int_function(queue_count
);
3515 nvme_show_int_function(sqsize
);
3517 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
3518 struct device_attribute
*attr
, const char *buf
,
3521 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3523 if (device_remove_file_self(dev
, attr
))
3524 nvme_delete_ctrl_sync(ctrl
);
3527 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
3529 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
3530 struct device_attribute
*attr
,
3533 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3535 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
3537 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
3539 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
3540 struct device_attribute
*attr
,
3543 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3544 static const char *const state_name
[] = {
3545 [NVME_CTRL_NEW
] = "new",
3546 [NVME_CTRL_LIVE
] = "live",
3547 [NVME_CTRL_RESETTING
] = "resetting",
3548 [NVME_CTRL_CONNECTING
] = "connecting",
3549 [NVME_CTRL_DELETING
] = "deleting",
3550 [NVME_CTRL_DELETING_NOIO
]= "deleting (no IO)",
3551 [NVME_CTRL_DEAD
] = "dead",
3554 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
3555 state_name
[ctrl
->state
])
3556 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
3558 return sprintf(buf
, "unknown state\n");
3561 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
3563 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
3564 struct device_attribute
*attr
,
3567 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3569 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->subsys
->subnqn
);
3571 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
3573 static ssize_t
nvme_sysfs_show_hostnqn(struct device
*dev
,
3574 struct device_attribute
*attr
,
3577 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3579 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->opts
->host
->nqn
);
3581 static DEVICE_ATTR(hostnqn
, S_IRUGO
, nvme_sysfs_show_hostnqn
, NULL
);
3583 static ssize_t
nvme_sysfs_show_hostid(struct device
*dev
,
3584 struct device_attribute
*attr
,
3587 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3589 return snprintf(buf
, PAGE_SIZE
, "%pU\n", &ctrl
->opts
->host
->id
);
3591 static DEVICE_ATTR(hostid
, S_IRUGO
, nvme_sysfs_show_hostid
, NULL
);
3593 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
3594 struct device_attribute
*attr
,
3597 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3599 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
3601 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
3603 static ssize_t
nvme_ctrl_loss_tmo_show(struct device
*dev
,
3604 struct device_attribute
*attr
, char *buf
)
3606 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3607 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3609 if (ctrl
->opts
->max_reconnects
== -1)
3610 return sprintf(buf
, "off\n");
3611 return sprintf(buf
, "%d\n",
3612 opts
->max_reconnects
* opts
->reconnect_delay
);
3615 static ssize_t
nvme_ctrl_loss_tmo_store(struct device
*dev
,
3616 struct device_attribute
*attr
, const char *buf
, size_t count
)
3618 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3619 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3620 int ctrl_loss_tmo
, err
;
3622 err
= kstrtoint(buf
, 10, &ctrl_loss_tmo
);
3626 else if (ctrl_loss_tmo
< 0)
3627 opts
->max_reconnects
= -1;
3629 opts
->max_reconnects
= DIV_ROUND_UP(ctrl_loss_tmo
,
3630 opts
->reconnect_delay
);
3633 static DEVICE_ATTR(ctrl_loss_tmo
, S_IRUGO
| S_IWUSR
,
3634 nvme_ctrl_loss_tmo_show
, nvme_ctrl_loss_tmo_store
);
3636 static ssize_t
nvme_ctrl_reconnect_delay_show(struct device
*dev
,
3637 struct device_attribute
*attr
, char *buf
)
3639 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3641 if (ctrl
->opts
->reconnect_delay
== -1)
3642 return sprintf(buf
, "off\n");
3643 return sprintf(buf
, "%d\n", ctrl
->opts
->reconnect_delay
);
3646 static ssize_t
nvme_ctrl_reconnect_delay_store(struct device
*dev
,
3647 struct device_attribute
*attr
, const char *buf
, size_t count
)
3649 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3653 err
= kstrtou32(buf
, 10, &v
);
3657 ctrl
->opts
->reconnect_delay
= v
;
3660 static DEVICE_ATTR(reconnect_delay
, S_IRUGO
| S_IWUSR
,
3661 nvme_ctrl_reconnect_delay_show
, nvme_ctrl_reconnect_delay_store
);
3663 static struct attribute
*nvme_dev_attrs
[] = {
3664 &dev_attr_reset_controller
.attr
,
3665 &dev_attr_rescan_controller
.attr
,
3666 &dev_attr_model
.attr
,
3667 &dev_attr_serial
.attr
,
3668 &dev_attr_firmware_rev
.attr
,
3669 &dev_attr_cntlid
.attr
,
3670 &dev_attr_delete_controller
.attr
,
3671 &dev_attr_transport
.attr
,
3672 &dev_attr_subsysnqn
.attr
,
3673 &dev_attr_address
.attr
,
3674 &dev_attr_state
.attr
,
3675 &dev_attr_numa_node
.attr
,
3676 &dev_attr_queue_count
.attr
,
3677 &dev_attr_sqsize
.attr
,
3678 &dev_attr_hostnqn
.attr
,
3679 &dev_attr_hostid
.attr
,
3680 &dev_attr_ctrl_loss_tmo
.attr
,
3681 &dev_attr_reconnect_delay
.attr
,
3685 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
3686 struct attribute
*a
, int n
)
3688 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3689 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3691 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
3693 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
3695 if (a
== &dev_attr_hostnqn
.attr
&& !ctrl
->opts
)
3697 if (a
== &dev_attr_hostid
.attr
&& !ctrl
->opts
)
3699 if (a
== &dev_attr_ctrl_loss_tmo
.attr
&& !ctrl
->opts
)
3701 if (a
== &dev_attr_reconnect_delay
.attr
&& !ctrl
->opts
)
3707 static struct attribute_group nvme_dev_attrs_group
= {
3708 .attrs
= nvme_dev_attrs
,
3709 .is_visible
= nvme_dev_attrs_are_visible
,
3712 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
3713 &nvme_dev_attrs_group
,
3717 static struct nvme_ns_head
*nvme_find_ns_head(struct nvme_subsystem
*subsys
,
3720 struct nvme_ns_head
*h
;
3722 lockdep_assert_held(&subsys
->lock
);
3724 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3725 if (h
->ns_id
== nsid
&& kref_get_unless_zero(&h
->ref
))
3732 static int __nvme_check_ids(struct nvme_subsystem
*subsys
,
3733 struct nvme_ns_head
*new)
3735 struct nvme_ns_head
*h
;
3737 lockdep_assert_held(&subsys
->lock
);
3739 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3740 if (nvme_ns_ids_valid(&new->ids
) &&
3741 nvme_ns_ids_equal(&new->ids
, &h
->ids
))
3748 static struct nvme_ns_head
*nvme_alloc_ns_head(struct nvme_ctrl
*ctrl
,
3749 unsigned nsid
, struct nvme_ns_ids
*ids
)
3751 struct nvme_ns_head
*head
;
3752 size_t size
= sizeof(*head
);
3755 #ifdef CONFIG_NVME_MULTIPATH
3756 size
+= num_possible_nodes() * sizeof(struct nvme_ns
*);
3759 head
= kzalloc(size
, GFP_KERNEL
);
3762 ret
= ida_simple_get(&ctrl
->subsys
->ns_ida
, 1, 0, GFP_KERNEL
);
3765 head
->instance
= ret
;
3766 INIT_LIST_HEAD(&head
->list
);
3767 ret
= init_srcu_struct(&head
->srcu
);
3769 goto out_ida_remove
;
3770 head
->subsys
= ctrl
->subsys
;
3773 kref_init(&head
->ref
);
3775 ret
= __nvme_check_ids(ctrl
->subsys
, head
);
3777 dev_err(ctrl
->device
,
3778 "duplicate IDs for nsid %d\n", nsid
);
3779 goto out_cleanup_srcu
;
3782 if (head
->ids
.csi
) {
3783 ret
= nvme_get_effects_log(ctrl
, head
->ids
.csi
, &head
->effects
);
3785 goto out_cleanup_srcu
;
3787 head
->effects
= ctrl
->effects
;
3789 ret
= nvme_mpath_alloc_disk(ctrl
, head
);
3791 goto out_cleanup_srcu
;
3793 list_add_tail(&head
->entry
, &ctrl
->subsys
->nsheads
);
3795 kref_get(&ctrl
->subsys
->ref
);
3799 cleanup_srcu_struct(&head
->srcu
);
3801 ida_simple_remove(&ctrl
->subsys
->ns_ida
, head
->instance
);
3806 ret
= blk_status_to_errno(nvme_error_status(ret
));
3807 return ERR_PTR(ret
);
3810 static int nvme_init_ns_head(struct nvme_ns
*ns
, unsigned nsid
,
3811 struct nvme_ns_ids
*ids
, bool is_shared
)
3813 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
3814 struct nvme_ns_head
*head
= NULL
;
3817 mutex_lock(&ctrl
->subsys
->lock
);
3818 head
= nvme_find_ns_head(ctrl
->subsys
, nsid
);
3820 head
= nvme_alloc_ns_head(ctrl
, nsid
, ids
);
3822 ret
= PTR_ERR(head
);
3825 head
->shared
= is_shared
;
3828 if (!is_shared
|| !head
->shared
) {
3829 dev_err(ctrl
->device
,
3830 "Duplicate unshared namespace %d\n", nsid
);
3831 goto out_put_ns_head
;
3833 if (!nvme_ns_ids_equal(&head
->ids
, ids
)) {
3834 dev_err(ctrl
->device
,
3835 "IDs don't match for shared namespace %d\n",
3837 goto out_put_ns_head
;
3841 list_add_tail_rcu(&ns
->siblings
, &head
->list
);
3843 mutex_unlock(&ctrl
->subsys
->lock
);
3847 nvme_put_ns_head(head
);
3849 mutex_unlock(&ctrl
->subsys
->lock
);
3853 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3855 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
3856 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
3858 return nsa
->head
->ns_id
- nsb
->head
->ns_id
;
3861 struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3863 struct nvme_ns
*ns
, *ret
= NULL
;
3865 down_read(&ctrl
->namespaces_rwsem
);
3866 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3867 if (ns
->head
->ns_id
== nsid
) {
3868 if (!kref_get_unless_zero(&ns
->kref
))
3873 if (ns
->head
->ns_id
> nsid
)
3876 up_read(&ctrl
->namespaces_rwsem
);
3879 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns
, NVME_TARGET_PASSTHRU
);
3881 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
,
3882 struct nvme_ns_ids
*ids
)
3885 struct gendisk
*disk
;
3886 struct nvme_id_ns
*id
;
3887 char disk_name
[DISK_NAME_LEN
];
3888 int node
= ctrl
->numa_node
, flags
= GENHD_FL_EXT_DEVT
;
3890 if (nvme_identify_ns(ctrl
, nsid
, ids
, &id
))
3893 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
3897 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
3898 if (IS_ERR(ns
->queue
))
3901 if (ctrl
->opts
&& ctrl
->opts
->data_digest
)
3902 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES
, ns
->queue
);
3904 blk_queue_flag_set(QUEUE_FLAG_NONROT
, ns
->queue
);
3905 if (ctrl
->ops
->flags
& NVME_F_PCI_P2PDMA
)
3906 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA
, ns
->queue
);
3908 ns
->queue
->queuedata
= ns
;
3910 kref_init(&ns
->kref
);
3912 if (nvme_init_ns_head(ns
, nsid
, ids
, id
->nmic
& NVME_NS_NMIC_SHARED
))
3913 goto out_free_queue
;
3914 nvme_set_disk_name(disk_name
, ns
, ctrl
, &flags
);
3916 disk
= alloc_disk_node(0, node
);
3920 disk
->fops
= &nvme_bdev_ops
;
3921 disk
->private_data
= ns
;
3922 disk
->queue
= ns
->queue
;
3923 disk
->flags
= flags
;
3924 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
3927 if (nvme_update_ns_info(ns
, id
))
3930 if ((ctrl
->quirks
& NVME_QUIRK_LIGHTNVM
) && id
->vs
[0] == 0x1) {
3931 if (nvme_nvm_register(ns
, disk_name
, node
)) {
3932 dev_warn(ctrl
->device
, "LightNVM init failure\n");
3937 down_write(&ctrl
->namespaces_rwsem
);
3938 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
3939 up_write(&ctrl
->namespaces_rwsem
);
3941 nvme_get_ctrl(ctrl
);
3943 device_add_disk(ctrl
->device
, ns
->disk
, nvme_ns_id_attr_groups
);
3945 nvme_mpath_add_disk(ns
, id
);
3946 nvme_fault_inject_init(&ns
->fault_inject
, ns
->disk
->disk_name
);
3951 /* prevent double queue cleanup */
3952 ns
->disk
->queue
= NULL
;
3955 mutex_lock(&ctrl
->subsys
->lock
);
3956 list_del_rcu(&ns
->siblings
);
3957 if (list_empty(&ns
->head
->list
))
3958 list_del_init(&ns
->head
->entry
);
3959 mutex_unlock(&ctrl
->subsys
->lock
);
3960 nvme_put_ns_head(ns
->head
);
3962 blk_cleanup_queue(ns
->queue
);
3969 static void nvme_ns_remove(struct nvme_ns
*ns
)
3971 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
3974 set_capacity(ns
->disk
, 0);
3975 nvme_fault_inject_fini(&ns
->fault_inject
);
3977 mutex_lock(&ns
->ctrl
->subsys
->lock
);
3978 list_del_rcu(&ns
->siblings
);
3979 if (list_empty(&ns
->head
->list
))
3980 list_del_init(&ns
->head
->entry
);
3981 mutex_unlock(&ns
->ctrl
->subsys
->lock
);
3983 synchronize_rcu(); /* guarantee not available in head->list */
3984 nvme_mpath_clear_current_path(ns
);
3985 synchronize_srcu(&ns
->head
->srcu
); /* wait for concurrent submissions */
3987 if (ns
->disk
->flags
& GENHD_FL_UP
) {
3988 del_gendisk(ns
->disk
);
3989 blk_cleanup_queue(ns
->queue
);
3990 if (blk_get_integrity(ns
->disk
))
3991 blk_integrity_unregister(ns
->disk
);
3994 down_write(&ns
->ctrl
->namespaces_rwsem
);
3995 list_del_init(&ns
->list
);
3996 up_write(&ns
->ctrl
->namespaces_rwsem
);
3998 nvme_mpath_check_last_path(ns
);
4002 static void nvme_ns_remove_by_nsid(struct nvme_ctrl
*ctrl
, u32 nsid
)
4004 struct nvme_ns
*ns
= nvme_find_get_ns(ctrl
, nsid
);
4012 static void nvme_validate_ns(struct nvme_ns
*ns
, struct nvme_ns_ids
*ids
)
4014 struct nvme_id_ns
*id
;
4015 int ret
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
4017 if (test_bit(NVME_NS_DEAD
, &ns
->flags
))
4020 ret
= nvme_identify_ns(ns
->ctrl
, ns
->head
->ns_id
, ids
, &id
);
4024 ret
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
4025 if (!nvme_ns_ids_equal(&ns
->head
->ids
, ids
)) {
4026 dev_err(ns
->ctrl
->device
,
4027 "identifiers changed for nsid %d\n", ns
->head
->ns_id
);
4031 ret
= nvme_update_ns_info(ns
, id
);
4037 * Only remove the namespace if we got a fatal error back from the
4038 * device, otherwise ignore the error and just move on.
4040 * TODO: we should probably schedule a delayed retry here.
4042 if (ret
> 0 && (ret
& NVME_SC_DNR
))
4046 static void nvme_validate_or_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
4048 struct nvme_ns_ids ids
= { };
4051 if (nvme_identify_ns_descs(ctrl
, nsid
, &ids
))
4054 ns
= nvme_find_get_ns(ctrl
, nsid
);
4056 nvme_validate_ns(ns
, &ids
);
4063 nvme_alloc_ns(ctrl
, nsid
, &ids
);
4066 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
)) {
4067 dev_warn(ctrl
->device
,
4068 "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
4072 if (!nvme_multi_css(ctrl
)) {
4073 dev_warn(ctrl
->device
,
4074 "command set not reported for nsid: %d\n",
4078 nvme_alloc_ns(ctrl
, nsid
, &ids
);
4081 dev_warn(ctrl
->device
, "unknown csi %u for nsid %u\n",
4087 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
4090 struct nvme_ns
*ns
, *next
;
4093 down_write(&ctrl
->namespaces_rwsem
);
4094 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
4095 if (ns
->head
->ns_id
> nsid
|| test_bit(NVME_NS_DEAD
, &ns
->flags
))
4096 list_move_tail(&ns
->list
, &rm_list
);
4098 up_write(&ctrl
->namespaces_rwsem
);
4100 list_for_each_entry_safe(ns
, next
, &rm_list
, list
)
4105 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
)
4107 const int nr_entries
= NVME_IDENTIFY_DATA_SIZE
/ sizeof(__le32
);
4112 if (nvme_ctrl_limited_cns(ctrl
))
4115 ns_list
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
4120 struct nvme_command cmd
= {
4121 .identify
.opcode
= nvme_admin_identify
,
4122 .identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
,
4123 .identify
.nsid
= cpu_to_le32(prev
),
4126 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, ns_list
,
4127 NVME_IDENTIFY_DATA_SIZE
);
4129 dev_warn(ctrl
->device
,
4130 "Identify NS List failed (status=0x%x)\n", ret
);
4134 for (i
= 0; i
< nr_entries
; i
++) {
4135 u32 nsid
= le32_to_cpu(ns_list
[i
]);
4137 if (!nsid
) /* end of the list? */
4139 nvme_validate_or_alloc_ns(ctrl
, nsid
);
4140 while (++prev
< nsid
)
4141 nvme_ns_remove_by_nsid(ctrl
, prev
);
4145 nvme_remove_invalid_namespaces(ctrl
, prev
);
4151 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
)
4153 struct nvme_id_ctrl
*id
;
4156 if (nvme_identify_ctrl(ctrl
, &id
))
4158 nn
= le32_to_cpu(id
->nn
);
4161 for (i
= 1; i
<= nn
; i
++)
4162 nvme_validate_or_alloc_ns(ctrl
, i
);
4164 nvme_remove_invalid_namespaces(ctrl
, nn
);
4167 static void nvme_clear_changed_ns_log(struct nvme_ctrl
*ctrl
)
4169 size_t log_size
= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
);
4173 log
= kzalloc(log_size
, GFP_KERNEL
);
4178 * We need to read the log to clear the AEN, but we don't want to rely
4179 * on it for the changed namespace information as userspace could have
4180 * raced with us in reading the log page, which could cause us to miss
4183 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CHANGED_NS
, 0,
4184 NVME_CSI_NVM
, log
, log_size
, 0);
4186 dev_warn(ctrl
->device
,
4187 "reading changed ns log failed: %d\n", error
);
4192 static void nvme_scan_work(struct work_struct
*work
)
4194 struct nvme_ctrl
*ctrl
=
4195 container_of(work
, struct nvme_ctrl
, scan_work
);
4197 /* No tagset on a live ctrl means IO queues could not created */
4198 if (ctrl
->state
!= NVME_CTRL_LIVE
|| !ctrl
->tagset
)
4201 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
)) {
4202 dev_info(ctrl
->device
, "rescanning namespaces.\n");
4203 nvme_clear_changed_ns_log(ctrl
);
4206 mutex_lock(&ctrl
->scan_lock
);
4207 if (nvme_scan_ns_list(ctrl
) != 0)
4208 nvme_scan_ns_sequential(ctrl
);
4209 mutex_unlock(&ctrl
->scan_lock
);
4211 down_write(&ctrl
->namespaces_rwsem
);
4212 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
4213 up_write(&ctrl
->namespaces_rwsem
);
4217 * This function iterates the namespace list unlocked to allow recovery from
4218 * controller failure. It is up to the caller to ensure the namespace list is
4219 * not modified by scan work while this function is executing.
4221 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
4223 struct nvme_ns
*ns
, *next
;
4227 * make sure to requeue I/O to all namespaces as these
4228 * might result from the scan itself and must complete
4229 * for the scan_work to make progress
4231 nvme_mpath_clear_ctrl_paths(ctrl
);
4233 /* prevent racing with ns scanning */
4234 flush_work(&ctrl
->scan_work
);
4237 * The dead states indicates the controller was not gracefully
4238 * disconnected. In that case, we won't be able to flush any data while
4239 * removing the namespaces' disks; fail all the queues now to avoid
4240 * potentially having to clean up the failed sync later.
4242 if (ctrl
->state
== NVME_CTRL_DEAD
)
4243 nvme_kill_queues(ctrl
);
4245 /* this is a no-op when called from the controller reset handler */
4246 nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING_NOIO
);
4248 down_write(&ctrl
->namespaces_rwsem
);
4249 list_splice_init(&ctrl
->namespaces
, &ns_list
);
4250 up_write(&ctrl
->namespaces_rwsem
);
4252 list_for_each_entry_safe(ns
, next
, &ns_list
, list
)
4255 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
4257 static int nvme_class_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
4259 struct nvme_ctrl
*ctrl
=
4260 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4261 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
4264 ret
= add_uevent_var(env
, "NVME_TRTYPE=%s", ctrl
->ops
->name
);
4269 ret
= add_uevent_var(env
, "NVME_TRADDR=%s", opts
->traddr
);
4273 ret
= add_uevent_var(env
, "NVME_TRSVCID=%s",
4274 opts
->trsvcid
?: "none");
4278 ret
= add_uevent_var(env
, "NVME_HOST_TRADDR=%s",
4279 opts
->host_traddr
?: "none");
4284 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
4286 char *envp
[2] = { NULL
, NULL
};
4287 u32 aen_result
= ctrl
->aen_result
;
4289 ctrl
->aen_result
= 0;
4293 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
4296 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
4300 static void nvme_async_event_work(struct work_struct
*work
)
4302 struct nvme_ctrl
*ctrl
=
4303 container_of(work
, struct nvme_ctrl
, async_event_work
);
4305 nvme_aen_uevent(ctrl
);
4306 ctrl
->ops
->submit_async_event(ctrl
);
4309 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
4314 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
4320 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
4323 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
4325 struct nvme_fw_slot_info_log
*log
;
4327 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
4331 if (nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_FW_SLOT
, 0, NVME_CSI_NVM
,
4332 log
, sizeof(*log
), 0))
4333 dev_warn(ctrl
->device
, "Get FW SLOT INFO log error\n");
4337 static void nvme_fw_act_work(struct work_struct
*work
)
4339 struct nvme_ctrl
*ctrl
= container_of(work
,
4340 struct nvme_ctrl
, fw_act_work
);
4341 unsigned long fw_act_timeout
;
4344 fw_act_timeout
= jiffies
+
4345 msecs_to_jiffies(ctrl
->mtfa
* 100);
4347 fw_act_timeout
= jiffies
+
4348 msecs_to_jiffies(admin_timeout
* 1000);
4350 nvme_stop_queues(ctrl
);
4351 while (nvme_ctrl_pp_status(ctrl
)) {
4352 if (time_after(jiffies
, fw_act_timeout
)) {
4353 dev_warn(ctrl
->device
,
4354 "Fw activation timeout, reset controller\n");
4355 nvme_try_sched_reset(ctrl
);
4361 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
))
4364 nvme_start_queues(ctrl
);
4365 /* read FW slot information to clear the AER */
4366 nvme_get_fw_slot_info(ctrl
);
4369 static void nvme_handle_aen_notice(struct nvme_ctrl
*ctrl
, u32 result
)
4371 u32 aer_notice_type
= (result
& 0xff00) >> 8;
4373 trace_nvme_async_event(ctrl
, aer_notice_type
);
4375 switch (aer_notice_type
) {
4376 case NVME_AER_NOTICE_NS_CHANGED
:
4377 set_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
);
4378 nvme_queue_scan(ctrl
);
4380 case NVME_AER_NOTICE_FW_ACT_STARTING
:
4382 * We are (ab)using the RESETTING state to prevent subsequent
4383 * recovery actions from interfering with the controller's
4384 * firmware activation.
4386 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
4387 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
4389 #ifdef CONFIG_NVME_MULTIPATH
4390 case NVME_AER_NOTICE_ANA
:
4391 if (!ctrl
->ana_log_buf
)
4393 queue_work(nvme_wq
, &ctrl
->ana_work
);
4396 case NVME_AER_NOTICE_DISC_CHANGED
:
4397 ctrl
->aen_result
= result
;
4400 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
4404 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
4405 volatile union nvme_result
*res
)
4407 u32 result
= le32_to_cpu(res
->u32
);
4408 u32 aer_type
= result
& 0x07;
4410 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
4414 case NVME_AER_NOTICE
:
4415 nvme_handle_aen_notice(ctrl
, result
);
4417 case NVME_AER_ERROR
:
4418 case NVME_AER_SMART
:
4421 trace_nvme_async_event(ctrl
, aer_type
);
4422 ctrl
->aen_result
= result
;
4427 queue_work(nvme_wq
, &ctrl
->async_event_work
);
4429 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
4431 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
4433 nvme_mpath_stop(ctrl
);
4434 nvme_stop_keep_alive(ctrl
);
4435 nvme_stop_failfast_work(ctrl
);
4436 flush_work(&ctrl
->async_event_work
);
4437 cancel_work_sync(&ctrl
->fw_act_work
);
4439 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
4441 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
4443 nvme_start_keep_alive(ctrl
);
4445 nvme_enable_aen(ctrl
);
4447 if (ctrl
->queue_count
> 1) {
4448 nvme_queue_scan(ctrl
);
4449 nvme_start_queues(ctrl
);
4452 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
4454 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
4456 nvme_fault_inject_fini(&ctrl
->fault_inject
);
4457 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
4458 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
4459 nvme_put_ctrl(ctrl
);
4461 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
4463 static void nvme_free_cels(struct nvme_ctrl
*ctrl
)
4465 struct nvme_effects_log
*cel
;
4468 xa_for_each (&ctrl
->cels
, i
, cel
) {
4469 xa_erase(&ctrl
->cels
, i
);
4473 xa_destroy(&ctrl
->cels
);
4476 static void nvme_free_ctrl(struct device
*dev
)
4478 struct nvme_ctrl
*ctrl
=
4479 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4480 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
4482 if (!subsys
|| ctrl
->instance
!= subsys
->instance
)
4483 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4485 nvme_free_cels(ctrl
);
4486 nvme_mpath_uninit(ctrl
);
4487 __free_page(ctrl
->discard_page
);
4490 mutex_lock(&nvme_subsystems_lock
);
4491 list_del(&ctrl
->subsys_entry
);
4492 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
4493 mutex_unlock(&nvme_subsystems_lock
);
4496 ctrl
->ops
->free_ctrl(ctrl
);
4499 nvme_put_subsystem(subsys
);
4503 * Initialize a NVMe controller structures. This needs to be called during
4504 * earliest initialization so that we have the initialized structured around
4507 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
4508 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
4512 ctrl
->state
= NVME_CTRL_NEW
;
4513 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
4514 spin_lock_init(&ctrl
->lock
);
4515 mutex_init(&ctrl
->scan_lock
);
4516 INIT_LIST_HEAD(&ctrl
->namespaces
);
4517 xa_init(&ctrl
->cels
);
4518 init_rwsem(&ctrl
->namespaces_rwsem
);
4521 ctrl
->quirks
= quirks
;
4522 ctrl
->numa_node
= NUMA_NO_NODE
;
4523 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
4524 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
4525 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
4526 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
4527 init_waitqueue_head(&ctrl
->state_wq
);
4529 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
4530 INIT_DELAYED_WORK(&ctrl
->failfast_work
, nvme_failfast_work
);
4531 memset(&ctrl
->ka_cmd
, 0, sizeof(ctrl
->ka_cmd
));
4532 ctrl
->ka_cmd
.common
.opcode
= nvme_admin_keep_alive
;
4534 BUILD_BUG_ON(NVME_DSM_MAX_RANGES
* sizeof(struct nvme_dsm_range
) >
4536 ctrl
->discard_page
= alloc_page(GFP_KERNEL
);
4537 if (!ctrl
->discard_page
) {
4542 ret
= ida_simple_get(&nvme_instance_ida
, 0, 0, GFP_KERNEL
);
4545 ctrl
->instance
= ret
;
4547 device_initialize(&ctrl
->ctrl_device
);
4548 ctrl
->device
= &ctrl
->ctrl_device
;
4549 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_ctrl_base_chr_devt
),
4551 ctrl
->device
->class = nvme_class
;
4552 ctrl
->device
->parent
= ctrl
->dev
;
4553 ctrl
->device
->groups
= nvme_dev_attr_groups
;
4554 ctrl
->device
->release
= nvme_free_ctrl
;
4555 dev_set_drvdata(ctrl
->device
, ctrl
);
4556 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
4558 goto out_release_instance
;
4560 nvme_get_ctrl(ctrl
);
4561 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
4562 ctrl
->cdev
.owner
= ops
->module
;
4563 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
4568 * Initialize latency tolerance controls. The sysfs files won't
4569 * be visible to userspace unless the device actually supports APST.
4571 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
4572 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
4573 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
4575 nvme_fault_inject_init(&ctrl
->fault_inject
, dev_name(ctrl
->device
));
4579 nvme_put_ctrl(ctrl
);
4580 kfree_const(ctrl
->device
->kobj
.name
);
4581 out_release_instance
:
4582 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4584 if (ctrl
->discard_page
)
4585 __free_page(ctrl
->discard_page
);
4588 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
4591 * nvme_kill_queues(): Ends all namespace queues
4592 * @ctrl: the dead controller that needs to end
4594 * Call this function when the driver determines it is unable to get the
4595 * controller in a state capable of servicing IO.
4597 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
4601 down_read(&ctrl
->namespaces_rwsem
);
4603 /* Forcibly unquiesce queues to avoid blocking dispatch */
4604 if (ctrl
->admin_q
&& !blk_queue_dying(ctrl
->admin_q
))
4605 blk_mq_unquiesce_queue(ctrl
->admin_q
);
4607 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4608 nvme_set_queue_dying(ns
);
4610 up_read(&ctrl
->namespaces_rwsem
);
4612 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
4614 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
4618 down_read(&ctrl
->namespaces_rwsem
);
4619 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4620 blk_mq_unfreeze_queue(ns
->queue
);
4621 up_read(&ctrl
->namespaces_rwsem
);
4623 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
4625 int nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
4629 down_read(&ctrl
->namespaces_rwsem
);
4630 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
4631 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
4635 up_read(&ctrl
->namespaces_rwsem
);
4638 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
4640 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
4644 down_read(&ctrl
->namespaces_rwsem
);
4645 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4646 blk_mq_freeze_queue_wait(ns
->queue
);
4647 up_read(&ctrl
->namespaces_rwsem
);
4649 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
4651 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
4655 down_read(&ctrl
->namespaces_rwsem
);
4656 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4657 blk_freeze_queue_start(ns
->queue
);
4658 up_read(&ctrl
->namespaces_rwsem
);
4660 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
4662 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
4666 down_read(&ctrl
->namespaces_rwsem
);
4667 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4668 blk_mq_quiesce_queue(ns
->queue
);
4669 up_read(&ctrl
->namespaces_rwsem
);
4671 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
4673 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
4677 down_read(&ctrl
->namespaces_rwsem
);
4678 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4679 blk_mq_unquiesce_queue(ns
->queue
);
4680 up_read(&ctrl
->namespaces_rwsem
);
4682 EXPORT_SYMBOL_GPL(nvme_start_queues
);
4684 void nvme_sync_io_queues(struct nvme_ctrl
*ctrl
)
4688 down_read(&ctrl
->namespaces_rwsem
);
4689 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4690 blk_sync_queue(ns
->queue
);
4691 up_read(&ctrl
->namespaces_rwsem
);
4693 EXPORT_SYMBOL_GPL(nvme_sync_io_queues
);
4695 void nvme_sync_queues(struct nvme_ctrl
*ctrl
)
4697 nvme_sync_io_queues(ctrl
);
4699 blk_sync_queue(ctrl
->admin_q
);
4701 EXPORT_SYMBOL_GPL(nvme_sync_queues
);
4703 struct nvme_ctrl
*nvme_ctrl_from_file(struct file
*file
)
4705 if (file
->f_op
!= &nvme_dev_fops
)
4707 return file
->private_data
;
4709 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file
, NVME_TARGET_PASSTHRU
);
4712 * Check we didn't inadvertently grow the command structure sizes:
4714 static inline void _nvme_check_size(void)
4716 BUILD_BUG_ON(sizeof(struct nvme_common_command
) != 64);
4717 BUILD_BUG_ON(sizeof(struct nvme_rw_command
) != 64);
4718 BUILD_BUG_ON(sizeof(struct nvme_identify
) != 64);
4719 BUILD_BUG_ON(sizeof(struct nvme_features
) != 64);
4720 BUILD_BUG_ON(sizeof(struct nvme_download_firmware
) != 64);
4721 BUILD_BUG_ON(sizeof(struct nvme_format_cmd
) != 64);
4722 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd
) != 64);
4723 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd
) != 64);
4724 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd
) != 64);
4725 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command
) != 64);
4726 BUILD_BUG_ON(sizeof(struct nvme_command
) != 64);
4727 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl
) != NVME_IDENTIFY_DATA_SIZE
);
4728 BUILD_BUG_ON(sizeof(struct nvme_id_ns
) != NVME_IDENTIFY_DATA_SIZE
);
4729 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns
) != NVME_IDENTIFY_DATA_SIZE
);
4730 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns
) != NVME_IDENTIFY_DATA_SIZE
);
4731 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type
) != 64);
4732 BUILD_BUG_ON(sizeof(struct nvme_smart_log
) != 512);
4733 BUILD_BUG_ON(sizeof(struct nvme_dbbuf
) != 64);
4734 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd
) != 64);
4738 static int __init
nvme_core_init(void)
4740 int result
= -ENOMEM
;
4744 nvme_wq
= alloc_workqueue("nvme-wq",
4745 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4749 nvme_reset_wq
= alloc_workqueue("nvme-reset-wq",
4750 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4754 nvme_delete_wq
= alloc_workqueue("nvme-delete-wq",
4755 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4756 if (!nvme_delete_wq
)
4757 goto destroy_reset_wq
;
4759 result
= alloc_chrdev_region(&nvme_ctrl_base_chr_devt
, 0,
4760 NVME_MINORS
, "nvme");
4762 goto destroy_delete_wq
;
4764 nvme_class
= class_create(THIS_MODULE
, "nvme");
4765 if (IS_ERR(nvme_class
)) {
4766 result
= PTR_ERR(nvme_class
);
4767 goto unregister_chrdev
;
4769 nvme_class
->dev_uevent
= nvme_class_uevent
;
4771 nvme_subsys_class
= class_create(THIS_MODULE
, "nvme-subsystem");
4772 if (IS_ERR(nvme_subsys_class
)) {
4773 result
= PTR_ERR(nvme_subsys_class
);
4779 class_destroy(nvme_class
);
4781 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
4783 destroy_workqueue(nvme_delete_wq
);
4785 destroy_workqueue(nvme_reset_wq
);
4787 destroy_workqueue(nvme_wq
);
4792 static void __exit
nvme_core_exit(void)
4794 class_destroy(nvme_subsys_class
);
4795 class_destroy(nvme_class
);
4796 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
4797 destroy_workqueue(nvme_delete_wq
);
4798 destroy_workqueue(nvme_reset_wq
);
4799 destroy_workqueue(nvme_wq
);
4800 ida_destroy(&nvme_instance_ida
);
4803 MODULE_LICENSE("GPL");
4804 MODULE_VERSION("1.0");
4805 module_init(nvme_core_init
);
4806 module_exit(nvme_core_exit
);