1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/hdreg.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/backing-dev.h>
16 #include <linux/slab.h>
17 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/nvme_ioctl.h>
21 #include <linux/pm_qos.h>
22 #include <asm/unaligned.h>
27 #define CREATE_TRACE_POINTS
30 #define NVME_MINORS (1U << MINORBITS)
32 unsigned int admin_timeout
= 60;
33 module_param(admin_timeout
, uint
, 0644);
34 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
35 EXPORT_SYMBOL_GPL(admin_timeout
);
37 unsigned int nvme_io_timeout
= 30;
38 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
39 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
40 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
42 static unsigned char shutdown_timeout
= 5;
43 module_param(shutdown_timeout
, byte
, 0644);
44 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
46 static u8 nvme_max_retries
= 5;
47 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
48 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
50 static unsigned long default_ps_max_latency_us
= 100000;
51 module_param(default_ps_max_latency_us
, ulong
, 0644);
52 MODULE_PARM_DESC(default_ps_max_latency_us
,
53 "max power saving latency for new devices; use PM QOS to change per device");
55 static bool force_apst
;
56 module_param(force_apst
, bool, 0644);
57 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
59 static unsigned long apst_primary_timeout_ms
= 100;
60 module_param(apst_primary_timeout_ms
, ulong
, 0644);
61 MODULE_PARM_DESC(apst_primary_timeout_ms
,
62 "primary APST timeout in ms");
64 static unsigned long apst_secondary_timeout_ms
= 2000;
65 module_param(apst_secondary_timeout_ms
, ulong
, 0644);
66 MODULE_PARM_DESC(apst_secondary_timeout_ms
,
67 "secondary APST timeout in ms");
69 static unsigned long apst_primary_latency_tol_us
= 15000;
70 module_param(apst_primary_latency_tol_us
, ulong
, 0644);
71 MODULE_PARM_DESC(apst_primary_latency_tol_us
,
72 "primary APST latency tolerance in us");
74 static unsigned long apst_secondary_latency_tol_us
= 100000;
75 module_param(apst_secondary_latency_tol_us
, ulong
, 0644);
76 MODULE_PARM_DESC(apst_secondary_latency_tol_us
,
77 "secondary APST latency tolerance in us");
80 module_param(streams
, bool, 0644);
81 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
84 * nvme_wq - hosts nvme related works that are not reset or delete
85 * nvme_reset_wq - hosts nvme reset works
86 * nvme_delete_wq - hosts nvme delete works
88 * nvme_wq will host works such as scan, aen handling, fw activation,
89 * keep-alive, periodic reconnects etc. nvme_reset_wq
90 * runs reset works which also flush works hosted on nvme_wq for
91 * serialization purposes. nvme_delete_wq host controller deletion
92 * works which flush reset works for serialization.
94 struct workqueue_struct
*nvme_wq
;
95 EXPORT_SYMBOL_GPL(nvme_wq
);
97 struct workqueue_struct
*nvme_reset_wq
;
98 EXPORT_SYMBOL_GPL(nvme_reset_wq
);
100 struct workqueue_struct
*nvme_delete_wq
;
101 EXPORT_SYMBOL_GPL(nvme_delete_wq
);
103 static LIST_HEAD(nvme_subsystems
);
104 static DEFINE_MUTEX(nvme_subsystems_lock
);
106 static DEFINE_IDA(nvme_instance_ida
);
107 static dev_t nvme_ctrl_base_chr_devt
;
108 static struct class *nvme_class
;
109 static struct class *nvme_subsys_class
;
111 static DEFINE_IDA(nvme_ns_chr_minor_ida
);
112 static dev_t nvme_ns_chr_devt
;
113 static struct class *nvme_ns_chr_class
;
115 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
);
116 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
118 static void nvme_update_keep_alive(struct nvme_ctrl
*ctrl
,
119 struct nvme_command
*cmd
);
122 * Prepare a queue for teardown.
124 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
125 * the capacity to 0 after that to avoid blocking dispatchers that may be
126 * holding bd_butex. This will end buffered writers dirtying pages that can't
129 static void nvme_set_queue_dying(struct nvme_ns
*ns
)
131 if (test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
134 blk_mark_disk_dead(ns
->disk
);
135 blk_mq_unquiesce_queue(ns
->queue
);
137 set_capacity_and_notify(ns
->disk
, 0);
140 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
143 * Only new queue scan work when admin and IO queues are both alive
145 if (ctrl
->state
== NVME_CTRL_LIVE
&& ctrl
->tagset
)
146 queue_work(nvme_wq
, &ctrl
->scan_work
);
150 * Use this function to proceed with scheduling reset_work for a controller
151 * that had previously been set to the resetting state. This is intended for
152 * code paths that can't be interrupted by other reset attempts. A hot removal
153 * may prevent this from succeeding.
155 int nvme_try_sched_reset(struct nvme_ctrl
*ctrl
)
157 if (ctrl
->state
!= NVME_CTRL_RESETTING
)
159 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
163 EXPORT_SYMBOL_GPL(nvme_try_sched_reset
);
165 static void nvme_failfast_work(struct work_struct
*work
)
167 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
168 struct nvme_ctrl
, failfast_work
);
170 if (ctrl
->state
!= NVME_CTRL_CONNECTING
)
173 set_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
174 dev_info(ctrl
->device
, "failfast expired\n");
175 nvme_kick_requeue_lists(ctrl
);
178 static inline void nvme_start_failfast_work(struct nvme_ctrl
*ctrl
)
180 if (!ctrl
->opts
|| ctrl
->opts
->fast_io_fail_tmo
== -1)
183 schedule_delayed_work(&ctrl
->failfast_work
,
184 ctrl
->opts
->fast_io_fail_tmo
* HZ
);
187 static inline void nvme_stop_failfast_work(struct nvme_ctrl
*ctrl
)
192 cancel_delayed_work_sync(&ctrl
->failfast_work
);
193 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
197 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
199 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
201 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
205 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
207 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
211 ret
= nvme_reset_ctrl(ctrl
);
213 flush_work(&ctrl
->reset_work
);
214 if (ctrl
->state
!= NVME_CTRL_LIVE
)
221 static void nvme_do_delete_ctrl(struct nvme_ctrl
*ctrl
)
223 dev_info(ctrl
->device
,
224 "Removing ctrl: NQN \"%s\"\n", ctrl
->opts
->subsysnqn
);
226 flush_work(&ctrl
->reset_work
);
227 nvme_stop_ctrl(ctrl
);
228 nvme_remove_namespaces(ctrl
);
229 ctrl
->ops
->delete_ctrl(ctrl
);
230 nvme_uninit_ctrl(ctrl
);
233 static void nvme_delete_ctrl_work(struct work_struct
*work
)
235 struct nvme_ctrl
*ctrl
=
236 container_of(work
, struct nvme_ctrl
, delete_work
);
238 nvme_do_delete_ctrl(ctrl
);
241 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
243 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
245 if (!queue_work(nvme_delete_wq
, &ctrl
->delete_work
))
249 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
251 static void nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
254 * Keep a reference until nvme_do_delete_ctrl() complete,
255 * since ->delete_ctrl can free the controller.
258 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
259 nvme_do_delete_ctrl(ctrl
);
263 static blk_status_t
nvme_error_status(u16 status
)
265 switch (status
& 0x7ff) {
266 case NVME_SC_SUCCESS
:
268 case NVME_SC_CAP_EXCEEDED
:
269 return BLK_STS_NOSPC
;
270 case NVME_SC_LBA_RANGE
:
271 case NVME_SC_CMD_INTERRUPTED
:
272 case NVME_SC_NS_NOT_READY
:
273 return BLK_STS_TARGET
;
274 case NVME_SC_BAD_ATTRIBUTES
:
275 case NVME_SC_ONCS_NOT_SUPPORTED
:
276 case NVME_SC_INVALID_OPCODE
:
277 case NVME_SC_INVALID_FIELD
:
278 case NVME_SC_INVALID_NS
:
279 return BLK_STS_NOTSUPP
;
280 case NVME_SC_WRITE_FAULT
:
281 case NVME_SC_READ_ERROR
:
282 case NVME_SC_UNWRITTEN_BLOCK
:
283 case NVME_SC_ACCESS_DENIED
:
284 case NVME_SC_READ_ONLY
:
285 case NVME_SC_COMPARE_FAILED
:
286 return BLK_STS_MEDIUM
;
287 case NVME_SC_GUARD_CHECK
:
288 case NVME_SC_APPTAG_CHECK
:
289 case NVME_SC_REFTAG_CHECK
:
290 case NVME_SC_INVALID_PI
:
291 return BLK_STS_PROTECTION
;
292 case NVME_SC_RESERVATION_CONFLICT
:
293 return BLK_STS_NEXUS
;
294 case NVME_SC_HOST_PATH_ERROR
:
295 return BLK_STS_TRANSPORT
;
296 case NVME_SC_ZONE_TOO_MANY_ACTIVE
:
297 return BLK_STS_ZONE_ACTIVE_RESOURCE
;
298 case NVME_SC_ZONE_TOO_MANY_OPEN
:
299 return BLK_STS_ZONE_OPEN_RESOURCE
;
301 return BLK_STS_IOERR
;
305 static void nvme_retry_req(struct request
*req
)
307 unsigned long delay
= 0;
310 /* The mask and shift result must be <= 3 */
311 crd
= (nvme_req(req
)->status
& NVME_SC_CRD
) >> 11;
313 delay
= nvme_req(req
)->ctrl
->crdt
[crd
- 1] * 100;
315 nvme_req(req
)->retries
++;
316 blk_mq_requeue_request(req
, false);
317 blk_mq_delay_kick_requeue_list(req
->q
, delay
);
320 enum nvme_disposition
{
326 static inline enum nvme_disposition
nvme_decide_disposition(struct request
*req
)
328 if (likely(nvme_req(req
)->status
== 0))
331 if (blk_noretry_request(req
) ||
332 (nvme_req(req
)->status
& NVME_SC_DNR
) ||
333 nvme_req(req
)->retries
>= nvme_max_retries
)
336 if (req
->cmd_flags
& REQ_NVME_MPATH
) {
337 if (nvme_is_path_error(nvme_req(req
)->status
) ||
338 blk_queue_dying(req
->q
))
341 if (blk_queue_dying(req
->q
))
348 static inline void nvme_end_req(struct request
*req
)
350 blk_status_t status
= nvme_error_status(nvme_req(req
)->status
);
352 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED
) &&
353 req_op(req
) == REQ_OP_ZONE_APPEND
)
354 req
->__sector
= nvme_lba_to_sect(req
->q
->queuedata
,
355 le64_to_cpu(nvme_req(req
)->result
.u64
));
357 nvme_trace_bio_complete(req
);
358 blk_mq_end_request(req
, status
);
361 void nvme_complete_rq(struct request
*req
)
363 trace_nvme_complete_rq(req
);
364 nvme_cleanup_cmd(req
);
366 if (nvme_req(req
)->ctrl
->kas
)
367 nvme_req(req
)->ctrl
->comp_seen
= true;
369 switch (nvme_decide_disposition(req
)) {
377 nvme_failover_req(req
);
381 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
384 * Called to unwind from ->queue_rq on a failed command submission so that the
385 * multipathing code gets called to potentially failover to another path.
386 * The caller needs to unwind all transport specific resource allocations and
387 * must return propagate the return value.
389 blk_status_t
nvme_host_path_error(struct request
*req
)
391 nvme_req(req
)->status
= NVME_SC_HOST_PATH_ERROR
;
392 blk_mq_set_request_complete(req
);
393 nvme_complete_rq(req
);
396 EXPORT_SYMBOL_GPL(nvme_host_path_error
);
398 bool nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
400 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
401 "Cancelling I/O %d", req
->tag
);
403 /* don't abort one completed request */
404 if (blk_mq_request_completed(req
))
407 nvme_req(req
)->status
= NVME_SC_HOST_ABORTED_CMD
;
408 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
409 blk_mq_complete_request(req
);
412 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
414 void nvme_cancel_tagset(struct nvme_ctrl
*ctrl
)
417 blk_mq_tagset_busy_iter(ctrl
->tagset
,
418 nvme_cancel_request
, ctrl
);
419 blk_mq_tagset_wait_completed_request(ctrl
->tagset
);
422 EXPORT_SYMBOL_GPL(nvme_cancel_tagset
);
424 void nvme_cancel_admin_tagset(struct nvme_ctrl
*ctrl
)
426 if (ctrl
->admin_tagset
) {
427 blk_mq_tagset_busy_iter(ctrl
->admin_tagset
,
428 nvme_cancel_request
, ctrl
);
429 blk_mq_tagset_wait_completed_request(ctrl
->admin_tagset
);
432 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset
);
434 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
435 enum nvme_ctrl_state new_state
)
437 enum nvme_ctrl_state old_state
;
439 bool changed
= false;
441 spin_lock_irqsave(&ctrl
->lock
, flags
);
443 old_state
= ctrl
->state
;
448 case NVME_CTRL_RESETTING
:
449 case NVME_CTRL_CONNECTING
:
456 case NVME_CTRL_RESETTING
:
466 case NVME_CTRL_CONNECTING
:
469 case NVME_CTRL_RESETTING
:
476 case NVME_CTRL_DELETING
:
479 case NVME_CTRL_RESETTING
:
480 case NVME_CTRL_CONNECTING
:
487 case NVME_CTRL_DELETING_NOIO
:
489 case NVME_CTRL_DELETING
:
499 case NVME_CTRL_DELETING
:
511 ctrl
->state
= new_state
;
512 wake_up_all(&ctrl
->state_wq
);
515 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
519 if (ctrl
->state
== NVME_CTRL_LIVE
) {
520 if (old_state
== NVME_CTRL_CONNECTING
)
521 nvme_stop_failfast_work(ctrl
);
522 nvme_kick_requeue_lists(ctrl
);
523 } else if (ctrl
->state
== NVME_CTRL_CONNECTING
&&
524 old_state
== NVME_CTRL_RESETTING
) {
525 nvme_start_failfast_work(ctrl
);
529 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
532 * Returns true for sink states that can't ever transition back to live.
534 static bool nvme_state_terminal(struct nvme_ctrl
*ctrl
)
536 switch (ctrl
->state
) {
539 case NVME_CTRL_RESETTING
:
540 case NVME_CTRL_CONNECTING
:
542 case NVME_CTRL_DELETING
:
543 case NVME_CTRL_DELETING_NOIO
:
547 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl
->state
);
553 * Waits for the controller state to be resetting, or returns false if it is
554 * not possible to ever transition to that state.
556 bool nvme_wait_reset(struct nvme_ctrl
*ctrl
)
558 wait_event(ctrl
->state_wq
,
559 nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
) ||
560 nvme_state_terminal(ctrl
));
561 return ctrl
->state
== NVME_CTRL_RESETTING
;
563 EXPORT_SYMBOL_GPL(nvme_wait_reset
);
565 static void nvme_free_ns_head(struct kref
*ref
)
567 struct nvme_ns_head
*head
=
568 container_of(ref
, struct nvme_ns_head
, ref
);
570 nvme_mpath_remove_disk(head
);
571 ida_simple_remove(&head
->subsys
->ns_ida
, head
->instance
);
572 cleanup_srcu_struct(&head
->srcu
);
573 nvme_put_subsystem(head
->subsys
);
577 bool nvme_tryget_ns_head(struct nvme_ns_head
*head
)
579 return kref_get_unless_zero(&head
->ref
);
582 void nvme_put_ns_head(struct nvme_ns_head
*head
)
584 kref_put(&head
->ref
, nvme_free_ns_head
);
587 static void nvme_free_ns(struct kref
*kref
)
589 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
592 nvme_put_ns_head(ns
->head
);
593 nvme_put_ctrl(ns
->ctrl
);
597 static inline bool nvme_get_ns(struct nvme_ns
*ns
)
599 return kref_get_unless_zero(&ns
->kref
);
602 void nvme_put_ns(struct nvme_ns
*ns
)
604 kref_put(&ns
->kref
, nvme_free_ns
);
606 EXPORT_SYMBOL_NS_GPL(nvme_put_ns
, NVME_TARGET_PASSTHRU
);
608 static inline void nvme_clear_nvme_request(struct request
*req
)
610 nvme_req(req
)->status
= 0;
611 nvme_req(req
)->retries
= 0;
612 nvme_req(req
)->flags
= 0;
613 req
->rq_flags
|= RQF_DONTPREP
;
616 static inline unsigned int nvme_req_op(struct nvme_command
*cmd
)
618 return nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
621 static inline void nvme_init_request(struct request
*req
,
622 struct nvme_command
*cmd
)
624 if (req
->q
->queuedata
)
625 req
->timeout
= NVME_IO_TIMEOUT
;
626 else /* no queuedata implies admin queue */
627 req
->timeout
= NVME_ADMIN_TIMEOUT
;
629 /* passthru commands should let the driver set the SGL flags */
630 cmd
->common
.flags
&= ~NVME_CMD_SGL_ALL
;
632 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
633 if (req
->mq_hctx
->type
== HCTX_TYPE_POLL
)
634 req
->cmd_flags
|= REQ_HIPRI
;
635 nvme_clear_nvme_request(req
);
636 memcpy(nvme_req(req
)->cmd
, cmd
, sizeof(*cmd
));
639 struct request
*nvme_alloc_request(struct request_queue
*q
,
640 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
)
644 req
= blk_mq_alloc_request(q
, nvme_req_op(cmd
), flags
);
646 nvme_init_request(req
, cmd
);
649 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
651 static struct request
*nvme_alloc_request_qid(struct request_queue
*q
,
652 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
, int qid
)
656 req
= blk_mq_alloc_request_hctx(q
, nvme_req_op(cmd
), flags
,
659 nvme_init_request(req
, cmd
);
664 * For something we're not in a state to send to the device the default action
665 * is to busy it and retry it after the controller state is recovered. However,
666 * if the controller is deleting or if anything is marked for failfast or
667 * nvme multipath it is immediately failed.
669 * Note: commands used to initialize the controller will be marked for failfast.
670 * Note: nvme cli/ioctl commands are marked for failfast.
672 blk_status_t
nvme_fail_nonready_command(struct nvme_ctrl
*ctrl
,
675 if (ctrl
->state
!= NVME_CTRL_DELETING_NOIO
&&
676 ctrl
->state
!= NVME_CTRL_DEAD
&&
677 !test_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
) &&
678 !blk_noretry_request(rq
) && !(rq
->cmd_flags
& REQ_NVME_MPATH
))
679 return BLK_STS_RESOURCE
;
680 return nvme_host_path_error(rq
);
682 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command
);
684 bool __nvme_check_ready(struct nvme_ctrl
*ctrl
, struct request
*rq
,
687 struct nvme_request
*req
= nvme_req(rq
);
690 * currently we have a problem sending passthru commands
691 * on the admin_q if the controller is not LIVE because we can't
692 * make sure that they are going out after the admin connect,
693 * controller enable and/or other commands in the initialization
694 * sequence. until the controller will be LIVE, fail with
695 * BLK_STS_RESOURCE so that they will be rescheduled.
697 if (rq
->q
== ctrl
->admin_q
&& (req
->flags
& NVME_REQ_USERCMD
))
700 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
702 * Only allow commands on a live queue, except for the connect
703 * command, which is require to set the queue live in the
704 * appropinquate states.
706 switch (ctrl
->state
) {
707 case NVME_CTRL_CONNECTING
:
708 if (blk_rq_is_passthrough(rq
) && nvme_is_fabrics(req
->cmd
) &&
709 req
->cmd
->fabrics
.fctype
== nvme_fabrics_type_connect
)
721 EXPORT_SYMBOL_GPL(__nvme_check_ready
);
723 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
725 struct nvme_command c
= { };
727 c
.directive
.opcode
= nvme_admin_directive_send
;
728 c
.directive
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
729 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
730 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
731 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
732 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
734 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
737 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
739 return nvme_toggle_streams(ctrl
, false);
742 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
744 return nvme_toggle_streams(ctrl
, true);
747 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
748 struct streams_directive_params
*s
, u32 nsid
)
750 struct nvme_command c
= { };
752 memset(s
, 0, sizeof(*s
));
754 c
.directive
.opcode
= nvme_admin_directive_recv
;
755 c
.directive
.nsid
= cpu_to_le32(nsid
);
756 c
.directive
.numd
= cpu_to_le32(nvme_bytes_to_numd(sizeof(*s
)));
757 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
758 c
.directive
.dtype
= NVME_DIR_STREAMS
;
760 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
763 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
765 struct streams_directive_params s
;
768 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
773 ret
= nvme_enable_streams(ctrl
);
777 ret
= nvme_get_stream_params(ctrl
, &s
, NVME_NSID_ALL
);
779 goto out_disable_stream
;
781 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
782 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
783 dev_info(ctrl
->device
, "too few streams (%u) available\n",
785 goto out_disable_stream
;
788 ctrl
->nr_streams
= min_t(u16
, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
789 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
793 nvme_disable_streams(ctrl
);
798 * Check if 'req' has a write hint associated with it. If it does, assign
799 * a valid namespace stream to the write.
801 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
802 struct request
*req
, u16
*control
,
805 enum rw_hint streamid
= req
->write_hint
;
807 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
811 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
814 *control
|= NVME_RW_DTYPE_STREAMS
;
815 *dsmgmt
|= streamid
<< 16;
818 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
819 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
822 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
823 struct nvme_command
*cmnd
)
825 cmnd
->common
.opcode
= nvme_cmd_flush
;
826 cmnd
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
829 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
830 struct nvme_command
*cmnd
)
832 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
833 struct nvme_dsm_range
*range
;
837 * Some devices do not consider the DSM 'Number of Ranges' field when
838 * determining how much data to DMA. Always allocate memory for maximum
839 * number of segments to prevent device reading beyond end of buffer.
841 static const size_t alloc_size
= sizeof(*range
) * NVME_DSM_MAX_RANGES
;
843 range
= kzalloc(alloc_size
, GFP_ATOMIC
| __GFP_NOWARN
);
846 * If we fail allocation our range, fallback to the controller
847 * discard page. If that's also busy, it's safe to return
848 * busy, as we know we can make progress once that's freed.
850 if (test_and_set_bit_lock(0, &ns
->ctrl
->discard_page_busy
))
851 return BLK_STS_RESOURCE
;
853 range
= page_address(ns
->ctrl
->discard_page
);
856 __rq_for_each_bio(bio
, req
) {
857 u64 slba
= nvme_sect_to_lba(ns
, bio
->bi_iter
.bi_sector
);
858 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
861 range
[n
].cattr
= cpu_to_le32(0);
862 range
[n
].nlb
= cpu_to_le32(nlb
);
863 range
[n
].slba
= cpu_to_le64(slba
);
868 if (WARN_ON_ONCE(n
!= segments
)) {
869 if (virt_to_page(range
) == ns
->ctrl
->discard_page
)
870 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
873 return BLK_STS_IOERR
;
876 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
877 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
878 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
879 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
881 req
->special_vec
.bv_page
= virt_to_page(range
);
882 req
->special_vec
.bv_offset
= offset_in_page(range
);
883 req
->special_vec
.bv_len
= alloc_size
;
884 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
889 static inline blk_status_t
nvme_setup_write_zeroes(struct nvme_ns
*ns
,
890 struct request
*req
, struct nvme_command
*cmnd
)
892 if (ns
->ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
893 return nvme_setup_discard(ns
, req
, cmnd
);
895 cmnd
->write_zeroes
.opcode
= nvme_cmd_write_zeroes
;
896 cmnd
->write_zeroes
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
897 cmnd
->write_zeroes
.slba
=
898 cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
899 cmnd
->write_zeroes
.length
=
900 cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
901 if (nvme_ns_has_pi(ns
))
902 cmnd
->write_zeroes
.control
= cpu_to_le16(NVME_RW_PRINFO_PRACT
);
904 cmnd
->write_zeroes
.control
= 0;
908 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
909 struct request
*req
, struct nvme_command
*cmnd
,
912 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
916 if (req
->cmd_flags
& REQ_FUA
)
917 control
|= NVME_RW_FUA
;
918 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
919 control
|= NVME_RW_LR
;
921 if (req
->cmd_flags
& REQ_RAHEAD
)
922 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
924 cmnd
->rw
.opcode
= op
;
925 cmnd
->rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
926 cmnd
->rw
.slba
= cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
927 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
929 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
930 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
934 * If formated with metadata, the block layer always provides a
935 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
936 * we enable the PRACT bit for protection information or set the
937 * namespace capacity to zero to prevent any I/O.
939 if (!blk_integrity_rq(req
)) {
940 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
941 return BLK_STS_NOTSUPP
;
942 control
|= NVME_RW_PRINFO_PRACT
;
945 switch (ns
->pi_type
) {
946 case NVME_NS_DPS_PI_TYPE3
:
947 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
949 case NVME_NS_DPS_PI_TYPE1
:
950 case NVME_NS_DPS_PI_TYPE2
:
951 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
952 NVME_RW_PRINFO_PRCHK_REF
;
953 if (op
== nvme_cmd_zone_append
)
954 control
|= NVME_RW_APPEND_PIREMAP
;
955 cmnd
->rw
.reftag
= cpu_to_le32(t10_pi_ref_tag(req
));
960 cmnd
->rw
.control
= cpu_to_le16(control
);
961 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
965 void nvme_cleanup_cmd(struct request
*req
)
967 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
968 struct nvme_ctrl
*ctrl
= nvme_req(req
)->ctrl
;
970 if (req
->special_vec
.bv_page
== ctrl
->discard_page
)
971 clear_bit_unlock(0, &ctrl
->discard_page_busy
);
973 kfree(bvec_virt(&req
->special_vec
));
976 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd
);
978 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
)
980 struct nvme_command
*cmd
= nvme_req(req
)->cmd
;
981 struct nvme_ctrl
*ctrl
= nvme_req(req
)->ctrl
;
982 blk_status_t ret
= BLK_STS_OK
;
984 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
985 nvme_clear_nvme_request(req
);
986 memset(cmd
, 0, sizeof(*cmd
));
989 switch (req_op(req
)) {
992 /* these are setup prior to execution in nvme_init_request() */
995 nvme_setup_flush(ns
, cmd
);
997 case REQ_OP_ZONE_RESET_ALL
:
998 case REQ_OP_ZONE_RESET
:
999 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_RESET
);
1001 case REQ_OP_ZONE_OPEN
:
1002 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_OPEN
);
1004 case REQ_OP_ZONE_CLOSE
:
1005 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_CLOSE
);
1007 case REQ_OP_ZONE_FINISH
:
1008 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_FINISH
);
1010 case REQ_OP_WRITE_ZEROES
:
1011 ret
= nvme_setup_write_zeroes(ns
, req
, cmd
);
1013 case REQ_OP_DISCARD
:
1014 ret
= nvme_setup_discard(ns
, req
, cmd
);
1017 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_read
);
1020 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_write
);
1022 case REQ_OP_ZONE_APPEND
:
1023 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_zone_append
);
1027 return BLK_STS_IOERR
;
1030 if (!(ctrl
->quirks
& NVME_QUIRK_SKIP_CID_GEN
))
1031 nvme_req(req
)->genctr
++;
1032 cmd
->common
.command_id
= nvme_cid(req
);
1033 trace_nvme_setup_cmd(req
, cmd
);
1036 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
1041 * >0: nvme controller's cqe status response
1042 * <0: kernel error in lieu of controller response
1044 static int nvme_execute_rq(struct gendisk
*disk
, struct request
*rq
,
1047 blk_status_t status
;
1049 status
= blk_execute_rq(disk
, rq
, at_head
);
1050 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
1052 if (nvme_req(rq
)->status
)
1053 return nvme_req(rq
)->status
;
1054 return blk_status_to_errno(status
);
1058 * Returns 0 on success. If the result is negative, it's a Linux error code;
1059 * if the result is positive, it's an NVM Express status code
1061 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
1062 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
1063 unsigned timeout
, int qid
, int at_head
,
1064 blk_mq_req_flags_t flags
)
1066 struct request
*req
;
1069 if (qid
== NVME_QID_ANY
)
1070 req
= nvme_alloc_request(q
, cmd
, flags
);
1072 req
= nvme_alloc_request_qid(q
, cmd
, flags
, qid
);
1074 return PTR_ERR(req
);
1077 req
->timeout
= timeout
;
1079 if (buffer
&& bufflen
) {
1080 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
1085 ret
= nvme_execute_rq(NULL
, req
, at_head
);
1086 if (result
&& ret
>= 0)
1087 *result
= nvme_req(req
)->result
;
1089 blk_mq_free_request(req
);
1092 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
1094 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
1095 void *buffer
, unsigned bufflen
)
1097 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
1098 NVME_QID_ANY
, 0, 0);
1100 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
1102 static u32
nvme_known_admin_effects(u8 opcode
)
1105 case nvme_admin_format_nvm
:
1106 return NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_NCC
|
1107 NVME_CMD_EFFECTS_CSE_MASK
;
1108 case nvme_admin_sanitize_nvm
:
1109 return NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
;
1116 u32
nvme_command_effects(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
, u8 opcode
)
1121 if (ns
->head
->effects
)
1122 effects
= le32_to_cpu(ns
->head
->effects
->iocs
[opcode
]);
1123 if (effects
& ~(NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
))
1124 dev_warn_once(ctrl
->device
,
1125 "IO command:%02x has unhandled effects:%08x\n",
1131 effects
= le32_to_cpu(ctrl
->effects
->acs
[opcode
]);
1132 effects
|= nvme_known_admin_effects(opcode
);
1136 EXPORT_SYMBOL_NS_GPL(nvme_command_effects
, NVME_TARGET_PASSTHRU
);
1138 static u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1141 u32 effects
= nvme_command_effects(ctrl
, ns
, opcode
);
1144 * For simplicity, IO to all namespaces is quiesced even if the command
1145 * effects say only one namespace is affected.
1147 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1148 mutex_lock(&ctrl
->scan_lock
);
1149 mutex_lock(&ctrl
->subsys
->lock
);
1150 nvme_mpath_start_freeze(ctrl
->subsys
);
1151 nvme_mpath_wait_freeze(ctrl
->subsys
);
1152 nvme_start_freeze(ctrl
);
1153 nvme_wait_freeze(ctrl
);
1158 static void nvme_passthru_end(struct nvme_ctrl
*ctrl
, u32 effects
,
1159 struct nvme_command
*cmd
, int status
)
1161 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1162 nvme_unfreeze(ctrl
);
1163 nvme_mpath_unfreeze(ctrl
->subsys
);
1164 mutex_unlock(&ctrl
->subsys
->lock
);
1165 nvme_remove_invalid_namespaces(ctrl
, NVME_NSID_ALL
);
1166 mutex_unlock(&ctrl
->scan_lock
);
1168 if (effects
& NVME_CMD_EFFECTS_CCC
)
1169 nvme_init_ctrl_finish(ctrl
);
1170 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
)) {
1171 nvme_queue_scan(ctrl
);
1172 flush_work(&ctrl
->scan_work
);
1175 switch (cmd
->common
.opcode
) {
1176 case nvme_admin_set_features
:
1177 switch (le32_to_cpu(cmd
->common
.cdw10
) & 0xFF) {
1178 case NVME_FEAT_KATO
:
1180 * Keep alive commands interval on the host should be
1181 * updated when KATO is modified by Set Features
1185 nvme_update_keep_alive(ctrl
, cmd
);
1196 int nvme_execute_passthru_rq(struct request
*rq
)
1198 struct nvme_command
*cmd
= nvme_req(rq
)->cmd
;
1199 struct nvme_ctrl
*ctrl
= nvme_req(rq
)->ctrl
;
1200 struct nvme_ns
*ns
= rq
->q
->queuedata
;
1201 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
1205 effects
= nvme_passthru_start(ctrl
, ns
, cmd
->common
.opcode
);
1206 ret
= nvme_execute_rq(disk
, rq
, false);
1207 if (effects
) /* nothing to be done for zero cmd effects */
1208 nvme_passthru_end(ctrl
, effects
, cmd
, ret
);
1212 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq
, NVME_TARGET_PASSTHRU
);
1215 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
1217 * The host should send Keep Alive commands at half of the Keep Alive Timeout
1218 * accounting for transport roundtrip times [..].
1220 static void nvme_queue_keep_alive_work(struct nvme_ctrl
*ctrl
)
1222 queue_delayed_work(nvme_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
/ 2);
1225 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
1227 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
1228 unsigned long flags
;
1229 bool startka
= false;
1231 blk_mq_free_request(rq
);
1234 dev_err(ctrl
->device
,
1235 "failed nvme_keep_alive_end_io error=%d\n",
1240 ctrl
->comp_seen
= false;
1241 spin_lock_irqsave(&ctrl
->lock
, flags
);
1242 if (ctrl
->state
== NVME_CTRL_LIVE
||
1243 ctrl
->state
== NVME_CTRL_CONNECTING
)
1245 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1247 nvme_queue_keep_alive_work(ctrl
);
1250 static void nvme_keep_alive_work(struct work_struct
*work
)
1252 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
1253 struct nvme_ctrl
, ka_work
);
1254 bool comp_seen
= ctrl
->comp_seen
;
1257 if ((ctrl
->ctratt
& NVME_CTRL_ATTR_TBKAS
) && comp_seen
) {
1258 dev_dbg(ctrl
->device
,
1259 "reschedule traffic based keep-alive timer\n");
1260 ctrl
->comp_seen
= false;
1261 nvme_queue_keep_alive_work(ctrl
);
1265 rq
= nvme_alloc_request(ctrl
->admin_q
, &ctrl
->ka_cmd
,
1266 BLK_MQ_REQ_RESERVED
| BLK_MQ_REQ_NOWAIT
);
1268 /* allocation failure, reset the controller */
1269 dev_err(ctrl
->device
, "keep-alive failed: %ld\n", PTR_ERR(rq
));
1270 nvme_reset_ctrl(ctrl
);
1274 rq
->timeout
= ctrl
->kato
* HZ
;
1275 rq
->end_io_data
= ctrl
;
1276 blk_execute_rq_nowait(NULL
, rq
, 0, nvme_keep_alive_end_io
);
1279 static void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
1281 if (unlikely(ctrl
->kato
== 0))
1284 nvme_queue_keep_alive_work(ctrl
);
1287 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
1289 if (unlikely(ctrl
->kato
== 0))
1292 cancel_delayed_work_sync(&ctrl
->ka_work
);
1294 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
1296 static void nvme_update_keep_alive(struct nvme_ctrl
*ctrl
,
1297 struct nvme_command
*cmd
)
1299 unsigned int new_kato
=
1300 DIV_ROUND_UP(le32_to_cpu(cmd
->common
.cdw11
), 1000);
1302 dev_info(ctrl
->device
,
1303 "keep alive interval updated from %u ms to %u ms\n",
1304 ctrl
->kato
* 1000 / 2, new_kato
* 1000 / 2);
1306 nvme_stop_keep_alive(ctrl
);
1307 ctrl
->kato
= new_kato
;
1308 nvme_start_keep_alive(ctrl
);
1312 * In NVMe 1.0 the CNS field was just a binary controller or namespace
1313 * flag, thus sending any new CNS opcodes has a big chance of not working.
1314 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1315 * (but not for any later version).
1317 static bool nvme_ctrl_limited_cns(struct nvme_ctrl
*ctrl
)
1319 if (ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)
1320 return ctrl
->vs
< NVME_VS(1, 2, 0);
1321 return ctrl
->vs
< NVME_VS(1, 1, 0);
1324 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
1326 struct nvme_command c
= { };
1329 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1330 c
.identify
.opcode
= nvme_admin_identify
;
1331 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
1333 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
1337 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
1338 sizeof(struct nvme_id_ctrl
));
1344 static int nvme_process_ns_desc(struct nvme_ctrl
*ctrl
, struct nvme_ns_ids
*ids
,
1345 struct nvme_ns_id_desc
*cur
, bool *csi_seen
)
1347 const char *warn_str
= "ctrl returned bogus length:";
1350 switch (cur
->nidt
) {
1351 case NVME_NIDT_EUI64
:
1352 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
1353 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_EUI64\n",
1354 warn_str
, cur
->nidl
);
1357 memcpy(ids
->eui64
, data
+ sizeof(*cur
), NVME_NIDT_EUI64_LEN
);
1358 return NVME_NIDT_EUI64_LEN
;
1359 case NVME_NIDT_NGUID
:
1360 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
1361 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_NGUID\n",
1362 warn_str
, cur
->nidl
);
1365 memcpy(ids
->nguid
, data
+ sizeof(*cur
), NVME_NIDT_NGUID_LEN
);
1366 return NVME_NIDT_NGUID_LEN
;
1367 case NVME_NIDT_UUID
:
1368 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
1369 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_UUID\n",
1370 warn_str
, cur
->nidl
);
1373 uuid_copy(&ids
->uuid
, data
+ sizeof(*cur
));
1374 return NVME_NIDT_UUID_LEN
;
1376 if (cur
->nidl
!= NVME_NIDT_CSI_LEN
) {
1377 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_CSI\n",
1378 warn_str
, cur
->nidl
);
1381 memcpy(&ids
->csi
, data
+ sizeof(*cur
), NVME_NIDT_CSI_LEN
);
1383 return NVME_NIDT_CSI_LEN
;
1385 /* Skip unknown types */
1390 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1391 struct nvme_ns_ids
*ids
)
1393 struct nvme_command c
= { };
1394 bool csi_seen
= false;
1395 int status
, pos
, len
;
1398 if (ctrl
->vs
< NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl
))
1400 if (ctrl
->quirks
& NVME_QUIRK_NO_NS_DESC_LIST
)
1403 c
.identify
.opcode
= nvme_admin_identify
;
1404 c
.identify
.nsid
= cpu_to_le32(nsid
);
1405 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
1407 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
1411 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
1412 NVME_IDENTIFY_DATA_SIZE
);
1414 dev_warn(ctrl
->device
,
1415 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1420 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
1421 struct nvme_ns_id_desc
*cur
= data
+ pos
;
1426 len
= nvme_process_ns_desc(ctrl
, ids
, cur
, &csi_seen
);
1430 len
+= sizeof(*cur
);
1433 if (nvme_multi_css(ctrl
) && !csi_seen
) {
1434 dev_warn(ctrl
->device
, "Command set not reported for nsid:%d\n",
1444 static int nvme_identify_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1445 struct nvme_ns_ids
*ids
, struct nvme_id_ns
**id
)
1447 struct nvme_command c
= { };
1450 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1451 c
.identify
.opcode
= nvme_admin_identify
;
1452 c
.identify
.nsid
= cpu_to_le32(nsid
);
1453 c
.identify
.cns
= NVME_ID_CNS_NS
;
1455 *id
= kmalloc(sizeof(**id
), GFP_KERNEL
);
1459 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, *id
, sizeof(**id
));
1461 dev_warn(ctrl
->device
, "Identify namespace failed (%d)\n", error
);
1465 error
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
1466 if ((*id
)->ncap
== 0) /* namespace not allocated or attached */
1469 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
1470 !memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
1471 memcpy(ids
->eui64
, (*id
)->eui64
, sizeof(ids
->eui64
));
1472 if (ctrl
->vs
>= NVME_VS(1, 2, 0) &&
1473 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
1474 memcpy(ids
->nguid
, (*id
)->nguid
, sizeof(ids
->nguid
));
1483 static int nvme_features(struct nvme_ctrl
*dev
, u8 op
, unsigned int fid
,
1484 unsigned int dword11
, void *buffer
, size_t buflen
, u32
*result
)
1486 union nvme_result res
= { 0 };
1487 struct nvme_command c
= { };
1490 c
.features
.opcode
= op
;
1491 c
.features
.fid
= cpu_to_le32(fid
);
1492 c
.features
.dword11
= cpu_to_le32(dword11
);
1494 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
1495 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0);
1496 if (ret
>= 0 && result
)
1497 *result
= le32_to_cpu(res
.u32
);
1501 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1502 unsigned int dword11
, void *buffer
, size_t buflen
,
1505 return nvme_features(dev
, nvme_admin_set_features
, fid
, dword11
, buffer
,
1508 EXPORT_SYMBOL_GPL(nvme_set_features
);
1510 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1511 unsigned int dword11
, void *buffer
, size_t buflen
,
1514 return nvme_features(dev
, nvme_admin_get_features
, fid
, dword11
, buffer
,
1517 EXPORT_SYMBOL_GPL(nvme_get_features
);
1519 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
1521 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
1523 int status
, nr_io_queues
;
1525 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
1531 * Degraded controllers might return an error when setting the queue
1532 * count. We still want to be able to bring them online and offer
1533 * access to the admin queue, as that might be only way to fix them up.
1536 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
1539 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
1540 *count
= min(*count
, nr_io_queues
);
1545 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
1547 #define NVME_AEN_SUPPORTED \
1548 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1549 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1551 static void nvme_enable_aen(struct nvme_ctrl
*ctrl
)
1553 u32 result
, supported_aens
= ctrl
->oaes
& NVME_AEN_SUPPORTED
;
1556 if (!supported_aens
)
1559 status
= nvme_set_features(ctrl
, NVME_FEAT_ASYNC_EVENT
, supported_aens
,
1562 dev_warn(ctrl
->device
, "Failed to configure AEN (cfg %x)\n",
1565 queue_work(nvme_wq
, &ctrl
->async_event_work
);
1568 static int nvme_ns_open(struct nvme_ns
*ns
)
1571 /* should never be called due to GENHD_FL_HIDDEN */
1572 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns
->head
)))
1574 if (!nvme_get_ns(ns
))
1576 if (!try_module_get(ns
->ctrl
->ops
->module
))
1587 static void nvme_ns_release(struct nvme_ns
*ns
)
1590 module_put(ns
->ctrl
->ops
->module
);
1594 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1596 return nvme_ns_open(bdev
->bd_disk
->private_data
);
1599 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1601 nvme_ns_release(disk
->private_data
);
1604 int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1606 /* some standard values */
1607 geo
->heads
= 1 << 6;
1608 geo
->sectors
= 1 << 5;
1609 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1613 #ifdef CONFIG_BLK_DEV_INTEGRITY
1614 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
,
1615 u32 max_integrity_segments
)
1617 struct blk_integrity integrity
= { };
1620 case NVME_NS_DPS_PI_TYPE3
:
1621 integrity
.profile
= &t10_pi_type3_crc
;
1622 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1623 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1625 case NVME_NS_DPS_PI_TYPE1
:
1626 case NVME_NS_DPS_PI_TYPE2
:
1627 integrity
.profile
= &t10_pi_type1_crc
;
1628 integrity
.tag_size
= sizeof(u16
);
1629 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1632 integrity
.profile
= NULL
;
1635 integrity
.tuple_size
= ms
;
1636 blk_integrity_register(disk
, &integrity
);
1637 blk_queue_max_integrity_segments(disk
->queue
, max_integrity_segments
);
1640 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
,
1641 u32 max_integrity_segments
)
1644 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1646 static void nvme_config_discard(struct gendisk
*disk
, struct nvme_ns
*ns
)
1648 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1649 struct request_queue
*queue
= disk
->queue
;
1650 u32 size
= queue_logical_block_size(queue
);
1652 if (ctrl
->max_discard_sectors
== 0) {
1653 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, queue
);
1657 if (ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
)
1658 size
*= ns
->sws
* ns
->sgs
;
1660 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1661 NVME_DSM_MAX_RANGES
);
1663 queue
->limits
.discard_alignment
= 0;
1664 queue
->limits
.discard_granularity
= size
;
1666 /* If discard is already enabled, don't reset queue limits */
1667 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD
, queue
))
1670 blk_queue_max_discard_sectors(queue
, ctrl
->max_discard_sectors
);
1671 blk_queue_max_discard_segments(queue
, ctrl
->max_discard_segments
);
1673 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1674 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1677 static bool nvme_ns_ids_valid(struct nvme_ns_ids
*ids
)
1679 return !uuid_is_null(&ids
->uuid
) ||
1680 memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)) ||
1681 memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
));
1684 static bool nvme_ns_ids_equal(struct nvme_ns_ids
*a
, struct nvme_ns_ids
*b
)
1686 return uuid_equal(&a
->uuid
, &b
->uuid
) &&
1687 memcmp(&a
->nguid
, &b
->nguid
, sizeof(a
->nguid
)) == 0 &&
1688 memcmp(&a
->eui64
, &b
->eui64
, sizeof(a
->eui64
)) == 0 &&
1692 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1693 u32
*phys_bs
, u32
*io_opt
)
1695 struct streams_directive_params s
;
1698 if (!ctrl
->nr_streams
)
1701 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->head
->ns_id
);
1705 ns
->sws
= le32_to_cpu(s
.sws
);
1706 ns
->sgs
= le16_to_cpu(s
.sgs
);
1709 *phys_bs
= ns
->sws
* (1 << ns
->lba_shift
);
1711 *io_opt
= *phys_bs
* ns
->sgs
;
1717 static int nvme_configure_metadata(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1719 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1722 * The PI implementation requires the metadata size to be equal to the
1723 * t10 pi tuple size.
1725 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1726 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1727 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1731 ns
->features
&= ~(NVME_NS_METADATA_SUPPORTED
| NVME_NS_EXT_LBAS
);
1732 if (!ns
->ms
|| !(ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
1734 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
1736 * The NVMe over Fabrics specification only supports metadata as
1737 * part of the extended data LBA. We rely on HCA/HBA support to
1738 * remap the separate metadata buffer from the block layer.
1740 if (WARN_ON_ONCE(!(id
->flbas
& NVME_NS_FLBAS_META_EXT
)))
1742 if (ctrl
->max_integrity_segments
)
1744 (NVME_NS_METADATA_SUPPORTED
| NVME_NS_EXT_LBAS
);
1747 * For PCIe controllers, we can't easily remap the separate
1748 * metadata buffer from the block layer and thus require a
1749 * separate metadata buffer for block layer metadata/PI support.
1750 * We allow extended LBAs for the passthrough interface, though.
1752 if (id
->flbas
& NVME_NS_FLBAS_META_EXT
)
1753 ns
->features
|= NVME_NS_EXT_LBAS
;
1755 ns
->features
|= NVME_NS_METADATA_SUPPORTED
;
1761 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1762 struct request_queue
*q
)
1764 bool vwc
= ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
;
1766 if (ctrl
->max_hw_sectors
) {
1768 (ctrl
->max_hw_sectors
/ (NVME_CTRL_PAGE_SIZE
>> 9)) + 1;
1770 max_segments
= min_not_zero(max_segments
, ctrl
->max_segments
);
1771 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1772 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1774 blk_queue_virt_boundary(q
, NVME_CTRL_PAGE_SIZE
- 1);
1775 blk_queue_dma_alignment(q
, 7);
1776 blk_queue_write_cache(q
, vwc
, vwc
);
1779 static void nvme_update_disk_info(struct gendisk
*disk
,
1780 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1782 sector_t capacity
= nvme_lba_to_sect(ns
, le64_to_cpu(id
->nsze
));
1783 unsigned short bs
= 1 << ns
->lba_shift
;
1784 u32 atomic_bs
, phys_bs
, io_opt
= 0;
1787 * The block layer can't support LBA sizes larger than the page size
1788 * yet, so catch this early and don't allow block I/O.
1790 if (ns
->lba_shift
> PAGE_SHIFT
) {
1795 blk_integrity_unregister(disk
);
1797 atomic_bs
= phys_bs
= bs
;
1798 nvme_setup_streams_ns(ns
->ctrl
, ns
, &phys_bs
, &io_opt
);
1799 if (id
->nabo
== 0) {
1801 * Bit 1 indicates whether NAWUPF is defined for this namespace
1802 * and whether it should be used instead of AWUPF. If NAWUPF ==
1803 * 0 then AWUPF must be used instead.
1805 if (id
->nsfeat
& NVME_NS_FEAT_ATOMICS
&& id
->nawupf
)
1806 atomic_bs
= (1 + le16_to_cpu(id
->nawupf
)) * bs
;
1808 atomic_bs
= (1 + ns
->ctrl
->subsys
->awupf
) * bs
;
1811 if (id
->nsfeat
& NVME_NS_FEAT_IO_OPT
) {
1812 /* NPWG = Namespace Preferred Write Granularity */
1813 phys_bs
= bs
* (1 + le16_to_cpu(id
->npwg
));
1814 /* NOWS = Namespace Optimal Write Size */
1815 io_opt
= bs
* (1 + le16_to_cpu(id
->nows
));
1818 blk_queue_logical_block_size(disk
->queue
, bs
);
1820 * Linux filesystems assume writing a single physical block is
1821 * an atomic operation. Hence limit the physical block size to the
1822 * value of the Atomic Write Unit Power Fail parameter.
1824 blk_queue_physical_block_size(disk
->queue
, min(phys_bs
, atomic_bs
));
1825 blk_queue_io_min(disk
->queue
, phys_bs
);
1826 blk_queue_io_opt(disk
->queue
, io_opt
);
1829 * Register a metadata profile for PI, or the plain non-integrity NVMe
1830 * metadata masquerading as Type 0 if supported, otherwise reject block
1831 * I/O to namespaces with metadata except when the namespace supports
1832 * PI, as it can strip/insert in that case.
1835 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
) &&
1836 (ns
->features
& NVME_NS_METADATA_SUPPORTED
))
1837 nvme_init_integrity(disk
, ns
->ms
, ns
->pi_type
,
1838 ns
->ctrl
->max_integrity_segments
);
1839 else if (!nvme_ns_has_pi(ns
))
1843 set_capacity_and_notify(disk
, capacity
);
1845 nvme_config_discard(disk
, ns
);
1846 blk_queue_max_write_zeroes_sectors(disk
->queue
,
1847 ns
->ctrl
->max_zeroes_sectors
);
1849 set_disk_ro(disk
, (id
->nsattr
& NVME_NS_ATTR_RO
) ||
1850 test_bit(NVME_NS_FORCE_RO
, &ns
->flags
));
1853 static inline bool nvme_first_scan(struct gendisk
*disk
)
1855 /* nvme_alloc_ns() scans the disk prior to adding it */
1856 return !disk_live(disk
);
1859 static void nvme_set_chunk_sectors(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1861 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1864 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) &&
1865 is_power_of_2(ctrl
->max_hw_sectors
))
1866 iob
= ctrl
->max_hw_sectors
;
1868 iob
= nvme_lba_to_sect(ns
, le16_to_cpu(id
->noiob
));
1873 if (!is_power_of_2(iob
)) {
1874 if (nvme_first_scan(ns
->disk
))
1875 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1876 ns
->disk
->disk_name
, iob
);
1880 if (blk_queue_is_zoned(ns
->disk
->queue
)) {
1881 if (nvme_first_scan(ns
->disk
))
1882 pr_warn("%s: ignoring zoned namespace IO boundary\n",
1883 ns
->disk
->disk_name
);
1887 blk_queue_chunk_sectors(ns
->queue
, iob
);
1890 static int nvme_update_ns_info(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1892 unsigned lbaf
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
1895 blk_mq_freeze_queue(ns
->disk
->queue
);
1896 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
1897 nvme_set_queue_limits(ns
->ctrl
, ns
->queue
);
1899 ret
= nvme_configure_metadata(ns
, id
);
1902 nvme_set_chunk_sectors(ns
, id
);
1903 nvme_update_disk_info(ns
->disk
, ns
, id
);
1905 if (ns
->head
->ids
.csi
== NVME_CSI_ZNS
) {
1906 ret
= nvme_update_zone_info(ns
, lbaf
);
1911 set_bit(NVME_NS_READY
, &ns
->flags
);
1912 blk_mq_unfreeze_queue(ns
->disk
->queue
);
1914 if (blk_queue_is_zoned(ns
->queue
)) {
1915 ret
= nvme_revalidate_zones(ns
);
1916 if (ret
&& !nvme_first_scan(ns
->disk
))
1920 if (nvme_ns_head_multipath(ns
->head
)) {
1921 blk_mq_freeze_queue(ns
->head
->disk
->queue
);
1922 nvme_update_disk_info(ns
->head
->disk
, ns
, id
);
1923 nvme_mpath_revalidate_paths(ns
);
1924 blk_stack_limits(&ns
->head
->disk
->queue
->limits
,
1925 &ns
->queue
->limits
, 0);
1926 disk_update_readahead(ns
->head
->disk
);
1927 blk_mq_unfreeze_queue(ns
->head
->disk
->queue
);
1933 * If probing fails due an unsupported feature, hide the block device,
1934 * but still allow other access.
1936 if (ret
== -ENODEV
) {
1937 ns
->disk
->flags
|= GENHD_FL_HIDDEN
;
1938 set_bit(NVME_NS_READY
, &ns
->flags
);
1941 blk_mq_unfreeze_queue(ns
->disk
->queue
);
1945 static char nvme_pr_type(enum pr_type type
)
1948 case PR_WRITE_EXCLUSIVE
:
1950 case PR_EXCLUSIVE_ACCESS
:
1952 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1954 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1956 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1958 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1965 static int nvme_send_ns_head_pr_command(struct block_device
*bdev
,
1966 struct nvme_command
*c
, u8 data
[16])
1968 struct nvme_ns_head
*head
= bdev
->bd_disk
->private_data
;
1969 int srcu_idx
= srcu_read_lock(&head
->srcu
);
1970 struct nvme_ns
*ns
= nvme_find_path(head
);
1971 int ret
= -EWOULDBLOCK
;
1974 c
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1975 ret
= nvme_submit_sync_cmd(ns
->queue
, c
, data
, 16);
1977 srcu_read_unlock(&head
->srcu
, srcu_idx
);
1981 static int nvme_send_ns_pr_command(struct nvme_ns
*ns
, struct nvme_command
*c
,
1984 c
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1985 return nvme_submit_sync_cmd(ns
->queue
, c
, data
, 16);
1988 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1989 u64 key
, u64 sa_key
, u8 op
)
1991 struct nvme_command c
= { };
1992 u8 data
[16] = { 0, };
1994 put_unaligned_le64(key
, &data
[0]);
1995 put_unaligned_le64(sa_key
, &data
[8]);
1997 c
.common
.opcode
= op
;
1998 c
.common
.cdw10
= cpu_to_le32(cdw10
);
2000 if (IS_ENABLED(CONFIG_NVME_MULTIPATH
) &&
2001 bdev
->bd_disk
->fops
== &nvme_ns_head_ops
)
2002 return nvme_send_ns_head_pr_command(bdev
, &c
, data
);
2003 return nvme_send_ns_pr_command(bdev
->bd_disk
->private_data
, &c
, data
);
2006 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
2007 u64
new, unsigned flags
)
2011 if (flags
& ~PR_FL_IGNORE_KEY
)
2014 cdw10
= old
? 2 : 0;
2015 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
2016 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
2017 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
2020 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
2021 enum pr_type type
, unsigned flags
)
2025 if (flags
& ~PR_FL_IGNORE_KEY
)
2028 cdw10
= nvme_pr_type(type
) << 8;
2029 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
2030 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
2033 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
2034 enum pr_type type
, bool abort
)
2036 u32 cdw10
= nvme_pr_type(type
) << 8 | (abort
? 2 : 1);
2038 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
2041 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
2043 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
2045 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
2048 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
2050 u32 cdw10
= nvme_pr_type(type
) << 8 | (key
? 1 << 3 : 0);
2052 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
2055 const struct pr_ops nvme_pr_ops
= {
2056 .pr_register
= nvme_pr_register
,
2057 .pr_reserve
= nvme_pr_reserve
,
2058 .pr_release
= nvme_pr_release
,
2059 .pr_preempt
= nvme_pr_preempt
,
2060 .pr_clear
= nvme_pr_clear
,
2063 #ifdef CONFIG_BLK_SED_OPAL
2064 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
2067 struct nvme_ctrl
*ctrl
= data
;
2068 struct nvme_command cmd
= { };
2071 cmd
.common
.opcode
= nvme_admin_security_send
;
2073 cmd
.common
.opcode
= nvme_admin_security_recv
;
2074 cmd
.common
.nsid
= 0;
2075 cmd
.common
.cdw10
= cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
2076 cmd
.common
.cdw11
= cpu_to_le32(len
);
2078 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
, 0,
2079 NVME_QID_ANY
, 1, 0);
2081 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
2082 #endif /* CONFIG_BLK_SED_OPAL */
2084 #ifdef CONFIG_BLK_DEV_ZONED
2085 static int nvme_report_zones(struct gendisk
*disk
, sector_t sector
,
2086 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
2088 return nvme_ns_report_zones(disk
->private_data
, sector
, nr_zones
, cb
,
2092 #define nvme_report_zones NULL
2093 #endif /* CONFIG_BLK_DEV_ZONED */
2095 static const struct block_device_operations nvme_bdev_ops
= {
2096 .owner
= THIS_MODULE
,
2097 .ioctl
= nvme_ioctl
,
2099 .release
= nvme_release
,
2100 .getgeo
= nvme_getgeo
,
2101 .report_zones
= nvme_report_zones
,
2102 .pr_ops
= &nvme_pr_ops
,
2105 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
2107 unsigned long timeout
=
2108 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
2109 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
2112 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2115 if ((csts
& NVME_CSTS_RDY
) == bit
)
2118 usleep_range(1000, 2000);
2119 if (fatal_signal_pending(current
))
2121 if (time_after(jiffies
, timeout
)) {
2122 dev_err(ctrl
->device
,
2123 "Device not ready; aborting %s, CSTS=0x%x\n",
2124 enabled
? "initialisation" : "reset", csts
);
2133 * If the device has been passed off to us in an enabled state, just clear
2134 * the enabled bit. The spec says we should set the 'shutdown notification
2135 * bits', but doing so may cause the device to complete commands to the
2136 * admin queue ... and we don't know what memory that might be pointing at!
2138 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
)
2142 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2143 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
2145 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2149 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
2150 msleep(NVME_QUIRK_DELAY_AMOUNT
);
2152 return nvme_wait_ready(ctrl
, ctrl
->cap
, false);
2154 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
2156 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
)
2158 unsigned dev_page_min
;
2161 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
2163 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2166 dev_page_min
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2168 if (NVME_CTRL_PAGE_SHIFT
< dev_page_min
) {
2169 dev_err(ctrl
->device
,
2170 "Minimum device page size %u too large for host (%u)\n",
2171 1 << dev_page_min
, 1 << NVME_CTRL_PAGE_SHIFT
);
2175 if (NVME_CAP_CSS(ctrl
->cap
) & NVME_CAP_CSS_CSI
)
2176 ctrl
->ctrl_config
= NVME_CC_CSS_CSI
;
2178 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
2179 ctrl
->ctrl_config
|= (NVME_CTRL_PAGE_SHIFT
- 12) << NVME_CC_MPS_SHIFT
;
2180 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
2181 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
2182 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
2184 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2187 return nvme_wait_ready(ctrl
, ctrl
->cap
, true);
2189 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
2191 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
2193 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
2197 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2198 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
2200 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2204 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2205 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
2209 if (fatal_signal_pending(current
))
2211 if (time_after(jiffies
, timeout
)) {
2212 dev_err(ctrl
->device
,
2213 "Device shutdown incomplete; abort shutdown\n");
2220 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
2222 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
2227 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
2230 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
2231 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
2234 dev_warn_once(ctrl
->device
,
2235 "could not set timestamp (%d)\n", ret
);
2239 static int nvme_configure_acre(struct nvme_ctrl
*ctrl
)
2241 struct nvme_feat_host_behavior
*host
;
2244 /* Don't bother enabling the feature if retry delay is not reported */
2248 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
2252 host
->acre
= NVME_ENABLE_ACRE
;
2253 ret
= nvme_set_features(ctrl
, NVME_FEAT_HOST_BEHAVIOR
, 0,
2254 host
, sizeof(*host
), NULL
);
2260 * The function checks whether the given total (exlat + enlat) latency of
2261 * a power state allows the latter to be used as an APST transition target.
2262 * It does so by comparing the latency to the primary and secondary latency
2263 * tolerances defined by module params. If there's a match, the corresponding
2264 * timeout value is returned and the matching tolerance index (1 or 2) is
2267 static bool nvme_apst_get_transition_time(u64 total_latency
,
2268 u64
*transition_time
, unsigned *last_index
)
2270 if (total_latency
<= apst_primary_latency_tol_us
) {
2271 if (*last_index
== 1)
2274 *transition_time
= apst_primary_timeout_ms
;
2277 if (apst_secondary_timeout_ms
&&
2278 total_latency
<= apst_secondary_latency_tol_us
) {
2279 if (*last_index
<= 2)
2282 *transition_time
= apst_secondary_timeout_ms
;
2289 * APST (Autonomous Power State Transition) lets us program a table of power
2290 * state transitions that the controller will perform automatically.
2292 * Depending on module params, one of the two supported techniques will be used:
2294 * - If the parameters provide explicit timeouts and tolerances, they will be
2295 * used to build a table with up to 2 non-operational states to transition to.
2296 * The default parameter values were selected based on the values used by
2297 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2298 * regeneration of the APST table in the event of switching between external
2299 * and battery power, the timeouts and tolerances reflect a compromise
2300 * between values used by Microsoft for AC and battery scenarios.
2301 * - If not, we'll configure the table with a simple heuristic: we are willing
2302 * to spend at most 2% of the time transitioning between power states.
2303 * Therefore, when running in any given state, we will enter the next
2304 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2305 * microseconds, as long as that state's exit latency is under the requested
2308 * We will not autonomously enter any non-operational state for which the total
2309 * latency exceeds ps_max_latency_us.
2311 * Users can set ps_max_latency_us to zero to turn off APST.
2313 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
2315 struct nvme_feat_auto_pst
*table
;
2322 unsigned last_lt_index
= UINT_MAX
;
2325 * If APST isn't supported or if we haven't been initialized yet,
2326 * then don't do anything.
2331 if (ctrl
->npss
> 31) {
2332 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
2336 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2340 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
2341 /* Turn off APST. */
2342 dev_dbg(ctrl
->device
, "APST disabled\n");
2347 * Walk through all states from lowest- to highest-power.
2348 * According to the spec, lower-numbered states use more power. NPSS,
2349 * despite the name, is the index of the lowest-power state, not the
2352 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
2353 u64 total_latency_us
, exit_latency_us
, transition_ms
;
2356 table
->entries
[state
] = target
;
2359 * Don't allow transitions to the deepest state if it's quirked
2362 if (state
== ctrl
->npss
&&
2363 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
2367 * Is this state a useful non-operational state for higher-power
2368 * states to autonomously transition to?
2370 if (!(ctrl
->psd
[state
].flags
& NVME_PS_FLAGS_NON_OP_STATE
))
2373 exit_latency_us
= (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
2374 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
2377 total_latency_us
= exit_latency_us
+
2378 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
2381 * This state is good. It can be used as the APST idle target
2382 * for higher power states.
2384 if (apst_primary_timeout_ms
&& apst_primary_latency_tol_us
) {
2385 if (!nvme_apst_get_transition_time(total_latency_us
,
2386 &transition_ms
, &last_lt_index
))
2389 transition_ms
= total_latency_us
+ 19;
2390 do_div(transition_ms
, 20);
2391 if (transition_ms
> (1 << 24) - 1)
2392 transition_ms
= (1 << 24) - 1;
2395 target
= cpu_to_le64((state
<< 3) | (transition_ms
<< 8));
2398 if (total_latency_us
> max_lat_us
)
2399 max_lat_us
= total_latency_us
;
2403 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
2405 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2406 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
2410 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
2411 table
, sizeof(*table
), NULL
);
2413 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
2418 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
2420 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2424 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
2425 case PM_QOS_LATENCY_ANY
:
2433 if (ctrl
->ps_max_latency_us
!= latency
) {
2434 ctrl
->ps_max_latency_us
= latency
;
2435 if (ctrl
->state
== NVME_CTRL_LIVE
)
2436 nvme_configure_apst(ctrl
);
2440 struct nvme_core_quirk_entry
{
2442 * NVMe model and firmware strings are padded with spaces. For
2443 * simplicity, strings in the quirk table are padded with NULLs
2449 unsigned long quirks
;
2452 static const struct nvme_core_quirk_entry core_quirks
[] = {
2455 * This Toshiba device seems to die using any APST states. See:
2456 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2459 .mn
= "THNSF5256GPUK TOSHIBA",
2460 .quirks
= NVME_QUIRK_NO_APST
,
2464 * This LiteON CL1-3D*-Q11 firmware version has a race
2465 * condition associated with actions related to suspend to idle
2466 * LiteON has resolved the problem in future firmware
2470 .quirks
= NVME_QUIRK_SIMPLE_SUSPEND
,
2474 /* match is null-terminated but idstr is space-padded. */
2475 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
2482 matchlen
= strlen(match
);
2483 WARN_ON_ONCE(matchlen
> len
);
2485 if (memcmp(idstr
, match
, matchlen
))
2488 for (; matchlen
< len
; matchlen
++)
2489 if (idstr
[matchlen
] != ' ')
2495 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
2496 const struct nvme_core_quirk_entry
*q
)
2498 return q
->vid
== le16_to_cpu(id
->vid
) &&
2499 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
2500 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
2503 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
2504 struct nvme_id_ctrl
*id
)
2509 if(!(ctrl
->quirks
& NVME_QUIRK_IGNORE_DEV_SUBNQN
)) {
2510 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
2511 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
2512 strlcpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
2516 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
2517 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
2520 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2521 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
2522 "nqn.2014.08.org.nvmexpress:%04x%04x",
2523 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
2524 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
2525 off
+= sizeof(id
->sn
);
2526 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
2527 off
+= sizeof(id
->mn
);
2528 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
2531 static void nvme_release_subsystem(struct device
*dev
)
2533 struct nvme_subsystem
*subsys
=
2534 container_of(dev
, struct nvme_subsystem
, dev
);
2536 if (subsys
->instance
>= 0)
2537 ida_simple_remove(&nvme_instance_ida
, subsys
->instance
);
2541 static void nvme_destroy_subsystem(struct kref
*ref
)
2543 struct nvme_subsystem
*subsys
=
2544 container_of(ref
, struct nvme_subsystem
, ref
);
2546 mutex_lock(&nvme_subsystems_lock
);
2547 list_del(&subsys
->entry
);
2548 mutex_unlock(&nvme_subsystems_lock
);
2550 ida_destroy(&subsys
->ns_ida
);
2551 device_del(&subsys
->dev
);
2552 put_device(&subsys
->dev
);
2555 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
2557 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
2560 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
2562 struct nvme_subsystem
*subsys
;
2564 lockdep_assert_held(&nvme_subsystems_lock
);
2567 * Fail matches for discovery subsystems. This results
2568 * in each discovery controller bound to a unique subsystem.
2569 * This avoids issues with validating controller values
2570 * that can only be true when there is a single unique subsystem.
2571 * There may be multiple and completely independent entities
2572 * that provide discovery controllers.
2574 if (!strcmp(subsysnqn
, NVME_DISC_SUBSYS_NAME
))
2577 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
2578 if (strcmp(subsys
->subnqn
, subsysnqn
))
2580 if (!kref_get_unless_zero(&subsys
->ref
))
2588 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
2589 struct device_attribute subsys_attr_##_name = \
2590 __ATTR(_name, _mode, _show, NULL)
2592 static ssize_t
nvme_subsys_show_nqn(struct device
*dev
,
2593 struct device_attribute
*attr
,
2596 struct nvme_subsystem
*subsys
=
2597 container_of(dev
, struct nvme_subsystem
, dev
);
2599 return sysfs_emit(buf
, "%s\n", subsys
->subnqn
);
2601 static SUBSYS_ATTR_RO(subsysnqn
, S_IRUGO
, nvme_subsys_show_nqn
);
2603 #define nvme_subsys_show_str_function(field) \
2604 static ssize_t subsys_##field##_show(struct device *dev, \
2605 struct device_attribute *attr, char *buf) \
2607 struct nvme_subsystem *subsys = \
2608 container_of(dev, struct nvme_subsystem, dev); \
2609 return sysfs_emit(buf, "%.*s\n", \
2610 (int)sizeof(subsys->field), subsys->field); \
2612 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2614 nvme_subsys_show_str_function(model
);
2615 nvme_subsys_show_str_function(serial
);
2616 nvme_subsys_show_str_function(firmware_rev
);
2618 static struct attribute
*nvme_subsys_attrs
[] = {
2619 &subsys_attr_model
.attr
,
2620 &subsys_attr_serial
.attr
,
2621 &subsys_attr_firmware_rev
.attr
,
2622 &subsys_attr_subsysnqn
.attr
,
2623 #ifdef CONFIG_NVME_MULTIPATH
2624 &subsys_attr_iopolicy
.attr
,
2629 static const struct attribute_group nvme_subsys_attrs_group
= {
2630 .attrs
= nvme_subsys_attrs
,
2633 static const struct attribute_group
*nvme_subsys_attrs_groups
[] = {
2634 &nvme_subsys_attrs_group
,
2638 static inline bool nvme_discovery_ctrl(struct nvme_ctrl
*ctrl
)
2640 return ctrl
->opts
&& ctrl
->opts
->discovery_nqn
;
2643 static bool nvme_validate_cntlid(struct nvme_subsystem
*subsys
,
2644 struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2646 struct nvme_ctrl
*tmp
;
2648 lockdep_assert_held(&nvme_subsystems_lock
);
2650 list_for_each_entry(tmp
, &subsys
->ctrls
, subsys_entry
) {
2651 if (nvme_state_terminal(tmp
))
2654 if (tmp
->cntlid
== ctrl
->cntlid
) {
2655 dev_err(ctrl
->device
,
2656 "Duplicate cntlid %u with %s, rejecting\n",
2657 ctrl
->cntlid
, dev_name(tmp
->device
));
2661 if ((id
->cmic
& NVME_CTRL_CMIC_MULTI_CTRL
) ||
2662 nvme_discovery_ctrl(ctrl
))
2665 dev_err(ctrl
->device
,
2666 "Subsystem does not support multiple controllers\n");
2673 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2675 struct nvme_subsystem
*subsys
, *found
;
2678 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
2682 subsys
->instance
= -1;
2683 mutex_init(&subsys
->lock
);
2684 kref_init(&subsys
->ref
);
2685 INIT_LIST_HEAD(&subsys
->ctrls
);
2686 INIT_LIST_HEAD(&subsys
->nsheads
);
2687 nvme_init_subnqn(subsys
, ctrl
, id
);
2688 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
2689 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
2690 memcpy(subsys
->firmware_rev
, id
->fr
, sizeof(subsys
->firmware_rev
));
2691 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
2692 subsys
->cmic
= id
->cmic
;
2693 subsys
->awupf
= le16_to_cpu(id
->awupf
);
2694 #ifdef CONFIG_NVME_MULTIPATH
2695 subsys
->iopolicy
= NVME_IOPOLICY_NUMA
;
2698 subsys
->dev
.class = nvme_subsys_class
;
2699 subsys
->dev
.release
= nvme_release_subsystem
;
2700 subsys
->dev
.groups
= nvme_subsys_attrs_groups
;
2701 dev_set_name(&subsys
->dev
, "nvme-subsys%d", ctrl
->instance
);
2702 device_initialize(&subsys
->dev
);
2704 mutex_lock(&nvme_subsystems_lock
);
2705 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
2707 put_device(&subsys
->dev
);
2710 if (!nvme_validate_cntlid(subsys
, ctrl
, id
)) {
2712 goto out_put_subsystem
;
2715 ret
= device_add(&subsys
->dev
);
2717 dev_err(ctrl
->device
,
2718 "failed to register subsystem device.\n");
2719 put_device(&subsys
->dev
);
2722 ida_init(&subsys
->ns_ida
);
2723 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
2726 ret
= sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
2727 dev_name(ctrl
->device
));
2729 dev_err(ctrl
->device
,
2730 "failed to create sysfs link from subsystem.\n");
2731 goto out_put_subsystem
;
2735 subsys
->instance
= ctrl
->instance
;
2736 ctrl
->subsys
= subsys
;
2737 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
2738 mutex_unlock(&nvme_subsystems_lock
);
2742 nvme_put_subsystem(subsys
);
2744 mutex_unlock(&nvme_subsystems_lock
);
2748 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
, u8 csi
,
2749 void *log
, size_t size
, u64 offset
)
2751 struct nvme_command c
= { };
2752 u32 dwlen
= nvme_bytes_to_numd(size
);
2754 c
.get_log_page
.opcode
= nvme_admin_get_log_page
;
2755 c
.get_log_page
.nsid
= cpu_to_le32(nsid
);
2756 c
.get_log_page
.lid
= log_page
;
2757 c
.get_log_page
.lsp
= lsp
;
2758 c
.get_log_page
.numdl
= cpu_to_le16(dwlen
& ((1 << 16) - 1));
2759 c
.get_log_page
.numdu
= cpu_to_le16(dwlen
>> 16);
2760 c
.get_log_page
.lpol
= cpu_to_le32(lower_32_bits(offset
));
2761 c
.get_log_page
.lpou
= cpu_to_le32(upper_32_bits(offset
));
2762 c
.get_log_page
.csi
= csi
;
2764 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
2767 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
, u8 csi
,
2768 struct nvme_effects_log
**log
)
2770 struct nvme_effects_log
*cel
= xa_load(&ctrl
->cels
, csi
);
2776 cel
= kzalloc(sizeof(*cel
), GFP_KERNEL
);
2780 ret
= nvme_get_log(ctrl
, 0x00, NVME_LOG_CMD_EFFECTS
, 0, csi
,
2781 cel
, sizeof(*cel
), 0);
2787 xa_store(&ctrl
->cels
, csi
, cel
, GFP_KERNEL
);
2793 static inline u32
nvme_mps_to_sectors(struct nvme_ctrl
*ctrl
, u32 units
)
2795 u32 page_shift
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12, val
;
2797 if (check_shl_overflow(1U, units
+ page_shift
- 9, &val
))
2802 static int nvme_init_non_mdts_limits(struct nvme_ctrl
*ctrl
)
2804 struct nvme_command c
= { };
2805 struct nvme_id_ctrl_nvm
*id
;
2808 if (ctrl
->oncs
& NVME_CTRL_ONCS_DSM
) {
2809 ctrl
->max_discard_sectors
= UINT_MAX
;
2810 ctrl
->max_discard_segments
= NVME_DSM_MAX_RANGES
;
2812 ctrl
->max_discard_sectors
= 0;
2813 ctrl
->max_discard_segments
= 0;
2817 * Even though NVMe spec explicitly states that MDTS is not applicable
2818 * to the write-zeroes, we are cautious and limit the size to the
2819 * controllers max_hw_sectors value, which is based on the MDTS field
2820 * and possibly other limiting factors.
2822 if ((ctrl
->oncs
& NVME_CTRL_ONCS_WRITE_ZEROES
) &&
2823 !(ctrl
->quirks
& NVME_QUIRK_DISABLE_WRITE_ZEROES
))
2824 ctrl
->max_zeroes_sectors
= ctrl
->max_hw_sectors
;
2826 ctrl
->max_zeroes_sectors
= 0;
2828 if (nvme_ctrl_limited_cns(ctrl
))
2831 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
2835 c
.identify
.opcode
= nvme_admin_identify
;
2836 c
.identify
.cns
= NVME_ID_CNS_CS_CTRL
;
2837 c
.identify
.csi
= NVME_CSI_NVM
;
2839 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
2844 ctrl
->max_discard_segments
= id
->dmrl
;
2846 ctrl
->max_discard_sectors
= le32_to_cpu(id
->dmrsl
);
2848 ctrl
->max_zeroes_sectors
= nvme_mps_to_sectors(ctrl
, id
->wzsl
);
2855 static int nvme_init_identify(struct nvme_ctrl
*ctrl
)
2857 struct nvme_id_ctrl
*id
;
2859 bool prev_apst_enabled
;
2862 ret
= nvme_identify_ctrl(ctrl
, &id
);
2864 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
2868 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
2869 ret
= nvme_get_effects_log(ctrl
, NVME_CSI_NVM
, &ctrl
->effects
);
2874 if (!(ctrl
->ops
->flags
& NVME_F_FABRICS
))
2875 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
2877 if (!ctrl
->identified
) {
2880 ret
= nvme_init_subsystem(ctrl
, id
);
2885 * Check for quirks. Quirk can depend on firmware version,
2886 * so, in principle, the set of quirks present can change
2887 * across a reset. As a possible future enhancement, we
2888 * could re-scan for quirks every time we reinitialize
2889 * the device, but we'd have to make sure that the driver
2890 * behaves intelligently if the quirks change.
2892 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
2893 if (quirk_matches(id
, &core_quirks
[i
]))
2894 ctrl
->quirks
|= core_quirks
[i
].quirks
;
2898 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
2899 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2900 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
2903 ctrl
->crdt
[0] = le16_to_cpu(id
->crdt1
);
2904 ctrl
->crdt
[1] = le16_to_cpu(id
->crdt2
);
2905 ctrl
->crdt
[2] = le16_to_cpu(id
->crdt3
);
2907 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
2908 ctrl
->oncs
= le16_to_cpu(id
->oncs
);
2909 ctrl
->mtfa
= le16_to_cpu(id
->mtfa
);
2910 ctrl
->oaes
= le32_to_cpu(id
->oaes
);
2911 ctrl
->wctemp
= le16_to_cpu(id
->wctemp
);
2912 ctrl
->cctemp
= le16_to_cpu(id
->cctemp
);
2914 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
2915 ctrl
->vwc
= id
->vwc
;
2917 max_hw_sectors
= nvme_mps_to_sectors(ctrl
, id
->mdts
);
2919 max_hw_sectors
= UINT_MAX
;
2920 ctrl
->max_hw_sectors
=
2921 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
2923 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
2924 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
2925 ctrl
->kas
= le16_to_cpu(id
->kas
);
2926 ctrl
->max_namespaces
= le32_to_cpu(id
->mnan
);
2927 ctrl
->ctratt
= le32_to_cpu(id
->ctratt
);
2931 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / USEC_PER_SEC
;
2933 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
2934 shutdown_timeout
, 60);
2936 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
2937 dev_info(ctrl
->device
,
2938 "Shutdown timeout set to %u seconds\n",
2939 ctrl
->shutdown_timeout
);
2941 ctrl
->shutdown_timeout
= shutdown_timeout
;
2943 ctrl
->npss
= id
->npss
;
2944 ctrl
->apsta
= id
->apsta
;
2945 prev_apst_enabled
= ctrl
->apst_enabled
;
2946 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
2947 if (force_apst
&& id
->apsta
) {
2948 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2949 ctrl
->apst_enabled
= true;
2951 ctrl
->apst_enabled
= false;
2954 ctrl
->apst_enabled
= id
->apsta
;
2956 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
2958 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
2959 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
2960 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
2961 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
2962 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
2965 * In fabrics we need to verify the cntlid matches the
2968 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
2969 dev_err(ctrl
->device
,
2970 "Mismatching cntlid: Connect %u vs Identify "
2972 ctrl
->cntlid
, le16_to_cpu(id
->cntlid
));
2977 if (!nvme_discovery_ctrl(ctrl
) && !ctrl
->kas
) {
2978 dev_err(ctrl
->device
,
2979 "keep-alive support is mandatory for fabrics\n");
2984 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
2985 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
2986 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
2987 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
2990 ret
= nvme_mpath_init_identify(ctrl
, id
);
2994 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
2995 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
2996 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
2997 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
3005 * Initialize the cached copies of the Identify data and various controller
3006 * register in our nvme_ctrl structure. This should be called as soon as
3007 * the admin queue is fully up and running.
3009 int nvme_init_ctrl_finish(struct nvme_ctrl
*ctrl
)
3013 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
3015 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
3019 ctrl
->sqsize
= min_t(u16
, NVME_CAP_MQES(ctrl
->cap
), ctrl
->sqsize
);
3021 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
3022 ctrl
->subsystem
= NVME_CAP_NSSRC(ctrl
->cap
);
3024 ret
= nvme_init_identify(ctrl
);
3028 ret
= nvme_init_non_mdts_limits(ctrl
);
3032 ret
= nvme_configure_apst(ctrl
);
3036 ret
= nvme_configure_timestamp(ctrl
);
3040 ret
= nvme_configure_directives(ctrl
);
3044 ret
= nvme_configure_acre(ctrl
);
3048 if (!ctrl
->identified
&& !nvme_discovery_ctrl(ctrl
)) {
3049 ret
= nvme_hwmon_init(ctrl
);
3054 ctrl
->identified
= true;
3058 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish
);
3060 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
3062 struct nvme_ctrl
*ctrl
=
3063 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3065 switch (ctrl
->state
) {
3066 case NVME_CTRL_LIVE
:
3069 return -EWOULDBLOCK
;
3072 nvme_get_ctrl(ctrl
);
3073 if (!try_module_get(ctrl
->ops
->module
)) {
3074 nvme_put_ctrl(ctrl
);
3078 file
->private_data
= ctrl
;
3082 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
3084 struct nvme_ctrl
*ctrl
=
3085 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3087 module_put(ctrl
->ops
->module
);
3088 nvme_put_ctrl(ctrl
);
3092 static const struct file_operations nvme_dev_fops
= {
3093 .owner
= THIS_MODULE
,
3094 .open
= nvme_dev_open
,
3095 .release
= nvme_dev_release
,
3096 .unlocked_ioctl
= nvme_dev_ioctl
,
3097 .compat_ioctl
= compat_ptr_ioctl
,
3100 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
3101 struct device_attribute
*attr
, const char *buf
,
3104 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3107 ret
= nvme_reset_ctrl_sync(ctrl
);
3112 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
3114 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
3115 struct device_attribute
*attr
, const char *buf
,
3118 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3120 nvme_queue_scan(ctrl
);
3123 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
3125 static inline struct nvme_ns_head
*dev_to_ns_head(struct device
*dev
)
3127 struct gendisk
*disk
= dev_to_disk(dev
);
3129 if (disk
->fops
== &nvme_bdev_ops
)
3130 return nvme_get_ns_from_dev(dev
)->head
;
3132 return disk
->private_data
;
3135 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
3138 struct nvme_ns_head
*head
= dev_to_ns_head(dev
);
3139 struct nvme_ns_ids
*ids
= &head
->ids
;
3140 struct nvme_subsystem
*subsys
= head
->subsys
;
3141 int serial_len
= sizeof(subsys
->serial
);
3142 int model_len
= sizeof(subsys
->model
);
3144 if (!uuid_is_null(&ids
->uuid
))
3145 return sysfs_emit(buf
, "uuid.%pU\n", &ids
->uuid
);
3147 if (memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3148 return sysfs_emit(buf
, "eui.%16phN\n", ids
->nguid
);
3150 if (memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3151 return sysfs_emit(buf
, "eui.%8phN\n", ids
->eui64
);
3153 while (serial_len
> 0 && (subsys
->serial
[serial_len
- 1] == ' ' ||
3154 subsys
->serial
[serial_len
- 1] == '\0'))
3156 while (model_len
> 0 && (subsys
->model
[model_len
- 1] == ' ' ||
3157 subsys
->model
[model_len
- 1] == '\0'))
3160 return sysfs_emit(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", subsys
->vendor_id
,
3161 serial_len
, subsys
->serial
, model_len
, subsys
->model
,
3164 static DEVICE_ATTR_RO(wwid
);
3166 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
3169 return sysfs_emit(buf
, "%pU\n", dev_to_ns_head(dev
)->ids
.nguid
);
3171 static DEVICE_ATTR_RO(nguid
);
3173 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
3176 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3178 /* For backward compatibility expose the NGUID to userspace if
3179 * we have no UUID set
3181 if (uuid_is_null(&ids
->uuid
)) {
3182 printk_ratelimited(KERN_WARNING
3183 "No UUID available providing old NGUID\n");
3184 return sysfs_emit(buf
, "%pU\n", ids
->nguid
);
3186 return sysfs_emit(buf
, "%pU\n", &ids
->uuid
);
3188 static DEVICE_ATTR_RO(uuid
);
3190 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
3193 return sysfs_emit(buf
, "%8ph\n", dev_to_ns_head(dev
)->ids
.eui64
);
3195 static DEVICE_ATTR_RO(eui
);
3197 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
3200 return sysfs_emit(buf
, "%d\n", dev_to_ns_head(dev
)->ns_id
);
3202 static DEVICE_ATTR_RO(nsid
);
3204 static struct attribute
*nvme_ns_id_attrs
[] = {
3205 &dev_attr_wwid
.attr
,
3206 &dev_attr_uuid
.attr
,
3207 &dev_attr_nguid
.attr
,
3209 &dev_attr_nsid
.attr
,
3210 #ifdef CONFIG_NVME_MULTIPATH
3211 &dev_attr_ana_grpid
.attr
,
3212 &dev_attr_ana_state
.attr
,
3217 static umode_t
nvme_ns_id_attrs_are_visible(struct kobject
*kobj
,
3218 struct attribute
*a
, int n
)
3220 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3221 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3223 if (a
== &dev_attr_uuid
.attr
) {
3224 if (uuid_is_null(&ids
->uuid
) &&
3225 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3228 if (a
== &dev_attr_nguid
.attr
) {
3229 if (!memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3232 if (a
== &dev_attr_eui
.attr
) {
3233 if (!memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3236 #ifdef CONFIG_NVME_MULTIPATH
3237 if (a
== &dev_attr_ana_grpid
.attr
|| a
== &dev_attr_ana_state
.attr
) {
3238 if (dev_to_disk(dev
)->fops
!= &nvme_bdev_ops
) /* per-path attr */
3240 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev
)->ctrl
))
3247 static const struct attribute_group nvme_ns_id_attr_group
= {
3248 .attrs
= nvme_ns_id_attrs
,
3249 .is_visible
= nvme_ns_id_attrs_are_visible
,
3252 const struct attribute_group
*nvme_ns_id_attr_groups
[] = {
3253 &nvme_ns_id_attr_group
,
3257 #define nvme_show_str_function(field) \
3258 static ssize_t field##_show(struct device *dev, \
3259 struct device_attribute *attr, char *buf) \
3261 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3262 return sysfs_emit(buf, "%.*s\n", \
3263 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3265 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3267 nvme_show_str_function(model
);
3268 nvme_show_str_function(serial
);
3269 nvme_show_str_function(firmware_rev
);
3271 #define nvme_show_int_function(field) \
3272 static ssize_t field##_show(struct device *dev, \
3273 struct device_attribute *attr, char *buf) \
3275 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3276 return sysfs_emit(buf, "%d\n", ctrl->field); \
3278 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3280 nvme_show_int_function(cntlid
);
3281 nvme_show_int_function(numa_node
);
3282 nvme_show_int_function(queue_count
);
3283 nvme_show_int_function(sqsize
);
3284 nvme_show_int_function(kato
);
3286 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
3287 struct device_attribute
*attr
, const char *buf
,
3290 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3292 if (device_remove_file_self(dev
, attr
))
3293 nvme_delete_ctrl_sync(ctrl
);
3296 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
3298 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
3299 struct device_attribute
*attr
,
3302 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3304 return sysfs_emit(buf
, "%s\n", ctrl
->ops
->name
);
3306 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
3308 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
3309 struct device_attribute
*attr
,
3312 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3313 static const char *const state_name
[] = {
3314 [NVME_CTRL_NEW
] = "new",
3315 [NVME_CTRL_LIVE
] = "live",
3316 [NVME_CTRL_RESETTING
] = "resetting",
3317 [NVME_CTRL_CONNECTING
] = "connecting",
3318 [NVME_CTRL_DELETING
] = "deleting",
3319 [NVME_CTRL_DELETING_NOIO
]= "deleting (no IO)",
3320 [NVME_CTRL_DEAD
] = "dead",
3323 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
3324 state_name
[ctrl
->state
])
3325 return sysfs_emit(buf
, "%s\n", state_name
[ctrl
->state
]);
3327 return sysfs_emit(buf
, "unknown state\n");
3330 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
3332 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
3333 struct device_attribute
*attr
,
3336 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3338 return sysfs_emit(buf
, "%s\n", ctrl
->subsys
->subnqn
);
3340 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
3342 static ssize_t
nvme_sysfs_show_hostnqn(struct device
*dev
,
3343 struct device_attribute
*attr
,
3346 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3348 return sysfs_emit(buf
, "%s\n", ctrl
->opts
->host
->nqn
);
3350 static DEVICE_ATTR(hostnqn
, S_IRUGO
, nvme_sysfs_show_hostnqn
, NULL
);
3352 static ssize_t
nvme_sysfs_show_hostid(struct device
*dev
,
3353 struct device_attribute
*attr
,
3356 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3358 return sysfs_emit(buf
, "%pU\n", &ctrl
->opts
->host
->id
);
3360 static DEVICE_ATTR(hostid
, S_IRUGO
, nvme_sysfs_show_hostid
, NULL
);
3362 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
3363 struct device_attribute
*attr
,
3366 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3368 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
3370 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
3372 static ssize_t
nvme_ctrl_loss_tmo_show(struct device
*dev
,
3373 struct device_attribute
*attr
, char *buf
)
3375 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3376 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3378 if (ctrl
->opts
->max_reconnects
== -1)
3379 return sysfs_emit(buf
, "off\n");
3380 return sysfs_emit(buf
, "%d\n",
3381 opts
->max_reconnects
* opts
->reconnect_delay
);
3384 static ssize_t
nvme_ctrl_loss_tmo_store(struct device
*dev
,
3385 struct device_attribute
*attr
, const char *buf
, size_t count
)
3387 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3388 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3389 int ctrl_loss_tmo
, err
;
3391 err
= kstrtoint(buf
, 10, &ctrl_loss_tmo
);
3395 if (ctrl_loss_tmo
< 0)
3396 opts
->max_reconnects
= -1;
3398 opts
->max_reconnects
= DIV_ROUND_UP(ctrl_loss_tmo
,
3399 opts
->reconnect_delay
);
3402 static DEVICE_ATTR(ctrl_loss_tmo
, S_IRUGO
| S_IWUSR
,
3403 nvme_ctrl_loss_tmo_show
, nvme_ctrl_loss_tmo_store
);
3405 static ssize_t
nvme_ctrl_reconnect_delay_show(struct device
*dev
,
3406 struct device_attribute
*attr
, char *buf
)
3408 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3410 if (ctrl
->opts
->reconnect_delay
== -1)
3411 return sysfs_emit(buf
, "off\n");
3412 return sysfs_emit(buf
, "%d\n", ctrl
->opts
->reconnect_delay
);
3415 static ssize_t
nvme_ctrl_reconnect_delay_store(struct device
*dev
,
3416 struct device_attribute
*attr
, const char *buf
, size_t count
)
3418 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3422 err
= kstrtou32(buf
, 10, &v
);
3426 ctrl
->opts
->reconnect_delay
= v
;
3429 static DEVICE_ATTR(reconnect_delay
, S_IRUGO
| S_IWUSR
,
3430 nvme_ctrl_reconnect_delay_show
, nvme_ctrl_reconnect_delay_store
);
3432 static ssize_t
nvme_ctrl_fast_io_fail_tmo_show(struct device
*dev
,
3433 struct device_attribute
*attr
, char *buf
)
3435 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3437 if (ctrl
->opts
->fast_io_fail_tmo
== -1)
3438 return sysfs_emit(buf
, "off\n");
3439 return sysfs_emit(buf
, "%d\n", ctrl
->opts
->fast_io_fail_tmo
);
3442 static ssize_t
nvme_ctrl_fast_io_fail_tmo_store(struct device
*dev
,
3443 struct device_attribute
*attr
, const char *buf
, size_t count
)
3445 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3446 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3447 int fast_io_fail_tmo
, err
;
3449 err
= kstrtoint(buf
, 10, &fast_io_fail_tmo
);
3453 if (fast_io_fail_tmo
< 0)
3454 opts
->fast_io_fail_tmo
= -1;
3456 opts
->fast_io_fail_tmo
= fast_io_fail_tmo
;
3459 static DEVICE_ATTR(fast_io_fail_tmo
, S_IRUGO
| S_IWUSR
,
3460 nvme_ctrl_fast_io_fail_tmo_show
, nvme_ctrl_fast_io_fail_tmo_store
);
3462 static struct attribute
*nvme_dev_attrs
[] = {
3463 &dev_attr_reset_controller
.attr
,
3464 &dev_attr_rescan_controller
.attr
,
3465 &dev_attr_model
.attr
,
3466 &dev_attr_serial
.attr
,
3467 &dev_attr_firmware_rev
.attr
,
3468 &dev_attr_cntlid
.attr
,
3469 &dev_attr_delete_controller
.attr
,
3470 &dev_attr_transport
.attr
,
3471 &dev_attr_subsysnqn
.attr
,
3472 &dev_attr_address
.attr
,
3473 &dev_attr_state
.attr
,
3474 &dev_attr_numa_node
.attr
,
3475 &dev_attr_queue_count
.attr
,
3476 &dev_attr_sqsize
.attr
,
3477 &dev_attr_hostnqn
.attr
,
3478 &dev_attr_hostid
.attr
,
3479 &dev_attr_ctrl_loss_tmo
.attr
,
3480 &dev_attr_reconnect_delay
.attr
,
3481 &dev_attr_fast_io_fail_tmo
.attr
,
3482 &dev_attr_kato
.attr
,
3486 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
3487 struct attribute
*a
, int n
)
3489 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3490 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3492 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
3494 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
3496 if (a
== &dev_attr_hostnqn
.attr
&& !ctrl
->opts
)
3498 if (a
== &dev_attr_hostid
.attr
&& !ctrl
->opts
)
3500 if (a
== &dev_attr_ctrl_loss_tmo
.attr
&& !ctrl
->opts
)
3502 if (a
== &dev_attr_reconnect_delay
.attr
&& !ctrl
->opts
)
3504 if (a
== &dev_attr_fast_io_fail_tmo
.attr
&& !ctrl
->opts
)
3510 static const struct attribute_group nvme_dev_attrs_group
= {
3511 .attrs
= nvme_dev_attrs
,
3512 .is_visible
= nvme_dev_attrs_are_visible
,
3515 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
3516 &nvme_dev_attrs_group
,
3520 static struct nvme_ns_head
*nvme_find_ns_head(struct nvme_subsystem
*subsys
,
3523 struct nvme_ns_head
*h
;
3525 lockdep_assert_held(&subsys
->lock
);
3527 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3528 if (h
->ns_id
!= nsid
)
3530 if (!list_empty(&h
->list
) && nvme_tryget_ns_head(h
))
3537 static int __nvme_check_ids(struct nvme_subsystem
*subsys
,
3538 struct nvme_ns_head
*new)
3540 struct nvme_ns_head
*h
;
3542 lockdep_assert_held(&subsys
->lock
);
3544 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3545 if (nvme_ns_ids_valid(&new->ids
) &&
3546 nvme_ns_ids_equal(&new->ids
, &h
->ids
))
3553 static void nvme_cdev_rel(struct device
*dev
)
3555 ida_simple_remove(&nvme_ns_chr_minor_ida
, MINOR(dev
->devt
));
3558 void nvme_cdev_del(struct cdev
*cdev
, struct device
*cdev_device
)
3560 cdev_device_del(cdev
, cdev_device
);
3561 put_device(cdev_device
);
3564 int nvme_cdev_add(struct cdev
*cdev
, struct device
*cdev_device
,
3565 const struct file_operations
*fops
, struct module
*owner
)
3569 minor
= ida_simple_get(&nvme_ns_chr_minor_ida
, 0, 0, GFP_KERNEL
);
3572 cdev_device
->devt
= MKDEV(MAJOR(nvme_ns_chr_devt
), minor
);
3573 cdev_device
->class = nvme_ns_chr_class
;
3574 cdev_device
->release
= nvme_cdev_rel
;
3575 device_initialize(cdev_device
);
3576 cdev_init(cdev
, fops
);
3577 cdev
->owner
= owner
;
3578 ret
= cdev_device_add(cdev
, cdev_device
);
3580 put_device(cdev_device
);
3585 static int nvme_ns_chr_open(struct inode
*inode
, struct file
*file
)
3587 return nvme_ns_open(container_of(inode
->i_cdev
, struct nvme_ns
, cdev
));
3590 static int nvme_ns_chr_release(struct inode
*inode
, struct file
*file
)
3592 nvme_ns_release(container_of(inode
->i_cdev
, struct nvme_ns
, cdev
));
3596 static const struct file_operations nvme_ns_chr_fops
= {
3597 .owner
= THIS_MODULE
,
3598 .open
= nvme_ns_chr_open
,
3599 .release
= nvme_ns_chr_release
,
3600 .unlocked_ioctl
= nvme_ns_chr_ioctl
,
3601 .compat_ioctl
= compat_ptr_ioctl
,
3604 static int nvme_add_ns_cdev(struct nvme_ns
*ns
)
3608 ns
->cdev_device
.parent
= ns
->ctrl
->device
;
3609 ret
= dev_set_name(&ns
->cdev_device
, "ng%dn%d",
3610 ns
->ctrl
->instance
, ns
->head
->instance
);
3614 return nvme_cdev_add(&ns
->cdev
, &ns
->cdev_device
, &nvme_ns_chr_fops
,
3615 ns
->ctrl
->ops
->module
);
3618 static struct nvme_ns_head
*nvme_alloc_ns_head(struct nvme_ctrl
*ctrl
,
3619 unsigned nsid
, struct nvme_ns_ids
*ids
)
3621 struct nvme_ns_head
*head
;
3622 size_t size
= sizeof(*head
);
3625 #ifdef CONFIG_NVME_MULTIPATH
3626 size
+= num_possible_nodes() * sizeof(struct nvme_ns
*);
3629 head
= kzalloc(size
, GFP_KERNEL
);
3632 ret
= ida_simple_get(&ctrl
->subsys
->ns_ida
, 1, 0, GFP_KERNEL
);
3635 head
->instance
= ret
;
3636 INIT_LIST_HEAD(&head
->list
);
3637 ret
= init_srcu_struct(&head
->srcu
);
3639 goto out_ida_remove
;
3640 head
->subsys
= ctrl
->subsys
;
3643 kref_init(&head
->ref
);
3645 ret
= __nvme_check_ids(ctrl
->subsys
, head
);
3647 dev_err(ctrl
->device
,
3648 "duplicate IDs for nsid %d\n", nsid
);
3649 goto out_cleanup_srcu
;
3652 if (head
->ids
.csi
) {
3653 ret
= nvme_get_effects_log(ctrl
, head
->ids
.csi
, &head
->effects
);
3655 goto out_cleanup_srcu
;
3657 head
->effects
= ctrl
->effects
;
3659 ret
= nvme_mpath_alloc_disk(ctrl
, head
);
3661 goto out_cleanup_srcu
;
3663 list_add_tail(&head
->entry
, &ctrl
->subsys
->nsheads
);
3665 kref_get(&ctrl
->subsys
->ref
);
3669 cleanup_srcu_struct(&head
->srcu
);
3671 ida_simple_remove(&ctrl
->subsys
->ns_ida
, head
->instance
);
3676 ret
= blk_status_to_errno(nvme_error_status(ret
));
3677 return ERR_PTR(ret
);
3680 static int nvme_init_ns_head(struct nvme_ns
*ns
, unsigned nsid
,
3681 struct nvme_ns_ids
*ids
, bool is_shared
)
3683 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
3684 struct nvme_ns_head
*head
= NULL
;
3687 mutex_lock(&ctrl
->subsys
->lock
);
3688 head
= nvme_find_ns_head(ctrl
->subsys
, nsid
);
3690 head
= nvme_alloc_ns_head(ctrl
, nsid
, ids
);
3692 ret
= PTR_ERR(head
);
3695 head
->shared
= is_shared
;
3698 if (!is_shared
|| !head
->shared
) {
3699 dev_err(ctrl
->device
,
3700 "Duplicate unshared namespace %d\n", nsid
);
3701 goto out_put_ns_head
;
3703 if (!nvme_ns_ids_equal(&head
->ids
, ids
)) {
3704 dev_err(ctrl
->device
,
3705 "IDs don't match for shared namespace %d\n",
3707 goto out_put_ns_head
;
3711 list_add_tail_rcu(&ns
->siblings
, &head
->list
);
3713 mutex_unlock(&ctrl
->subsys
->lock
);
3717 nvme_put_ns_head(head
);
3719 mutex_unlock(&ctrl
->subsys
->lock
);
3723 struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3725 struct nvme_ns
*ns
, *ret
= NULL
;
3727 down_read(&ctrl
->namespaces_rwsem
);
3728 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3729 if (ns
->head
->ns_id
== nsid
) {
3730 if (!nvme_get_ns(ns
))
3735 if (ns
->head
->ns_id
> nsid
)
3738 up_read(&ctrl
->namespaces_rwsem
);
3741 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns
, NVME_TARGET_PASSTHRU
);
3744 * Add the namespace to the controller list while keeping the list ordered.
3746 static void nvme_ns_add_to_ctrl_list(struct nvme_ns
*ns
)
3748 struct nvme_ns
*tmp
;
3750 list_for_each_entry_reverse(tmp
, &ns
->ctrl
->namespaces
, list
) {
3751 if (tmp
->head
->ns_id
< ns
->head
->ns_id
) {
3752 list_add(&ns
->list
, &tmp
->list
);
3756 list_add(&ns
->list
, &ns
->ctrl
->namespaces
);
3759 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
,
3760 struct nvme_ns_ids
*ids
)
3763 struct gendisk
*disk
;
3764 struct nvme_id_ns
*id
;
3765 int node
= ctrl
->numa_node
;
3767 if (nvme_identify_ns(ctrl
, nsid
, ids
, &id
))
3770 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
3774 disk
= blk_mq_alloc_disk(ctrl
->tagset
, ns
);
3777 disk
->fops
= &nvme_bdev_ops
;
3778 disk
->private_data
= ns
;
3781 ns
->queue
= disk
->queue
;
3783 if (ctrl
->opts
&& ctrl
->opts
->data_digest
)
3784 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES
, ns
->queue
);
3786 blk_queue_flag_set(QUEUE_FLAG_NONROT
, ns
->queue
);
3787 if (ctrl
->ops
->flags
& NVME_F_PCI_P2PDMA
)
3788 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA
, ns
->queue
);
3791 kref_init(&ns
->kref
);
3793 if (nvme_init_ns_head(ns
, nsid
, ids
, id
->nmic
& NVME_NS_NMIC_SHARED
))
3794 goto out_cleanup_disk
;
3797 * Without the multipath code enabled, multiple controller per
3798 * subsystems are visible as devices and thus we cannot use the
3799 * subsystem instance.
3801 if (!nvme_mpath_set_disk_name(ns
, disk
->disk_name
, &disk
->flags
))
3802 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
,
3803 ns
->head
->instance
);
3805 if (nvme_update_ns_info(ns
, id
))
3808 down_write(&ctrl
->namespaces_rwsem
);
3809 nvme_ns_add_to_ctrl_list(ns
);
3810 up_write(&ctrl
->namespaces_rwsem
);
3811 nvme_get_ctrl(ctrl
);
3813 if (device_add_disk(ctrl
->device
, ns
->disk
, nvme_ns_id_attr_groups
))
3814 goto out_cleanup_ns_from_list
;
3816 if (!nvme_ns_head_multipath(ns
->head
))
3817 nvme_add_ns_cdev(ns
);
3819 nvme_mpath_add_disk(ns
, id
);
3820 nvme_fault_inject_init(&ns
->fault_inject
, ns
->disk
->disk_name
);
3825 out_cleanup_ns_from_list
:
3826 nvme_put_ctrl(ctrl
);
3827 down_write(&ctrl
->namespaces_rwsem
);
3828 list_del_init(&ns
->list
);
3829 up_write(&ctrl
->namespaces_rwsem
);
3831 mutex_lock(&ctrl
->subsys
->lock
);
3832 list_del_rcu(&ns
->siblings
);
3833 if (list_empty(&ns
->head
->list
))
3834 list_del_init(&ns
->head
->entry
);
3835 mutex_unlock(&ctrl
->subsys
->lock
);
3836 nvme_put_ns_head(ns
->head
);
3838 blk_cleanup_disk(disk
);
3845 static void nvme_ns_remove(struct nvme_ns
*ns
)
3847 bool last_path
= false;
3849 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
3852 clear_bit(NVME_NS_READY
, &ns
->flags
);
3853 set_capacity(ns
->disk
, 0);
3854 nvme_fault_inject_fini(&ns
->fault_inject
);
3856 mutex_lock(&ns
->ctrl
->subsys
->lock
);
3857 list_del_rcu(&ns
->siblings
);
3858 if (list_empty(&ns
->head
->list
)) {
3859 list_del_init(&ns
->head
->entry
);
3862 mutex_unlock(&ns
->ctrl
->subsys
->lock
);
3864 /* guarantee not available in head->list */
3867 /* wait for concurrent submissions */
3868 if (nvme_mpath_clear_current_path(ns
))
3869 synchronize_srcu(&ns
->head
->srcu
);
3871 if (!nvme_ns_head_multipath(ns
->head
))
3872 nvme_cdev_del(&ns
->cdev
, &ns
->cdev_device
);
3873 del_gendisk(ns
->disk
);
3874 blk_cleanup_queue(ns
->queue
);
3876 down_write(&ns
->ctrl
->namespaces_rwsem
);
3877 list_del_init(&ns
->list
);
3878 up_write(&ns
->ctrl
->namespaces_rwsem
);
3881 nvme_mpath_shutdown_disk(ns
->head
);
3885 static void nvme_ns_remove_by_nsid(struct nvme_ctrl
*ctrl
, u32 nsid
)
3887 struct nvme_ns
*ns
= nvme_find_get_ns(ctrl
, nsid
);
3895 static void nvme_validate_ns(struct nvme_ns
*ns
, struct nvme_ns_ids
*ids
)
3897 struct nvme_id_ns
*id
;
3898 int ret
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
3900 if (test_bit(NVME_NS_DEAD
, &ns
->flags
))
3903 ret
= nvme_identify_ns(ns
->ctrl
, ns
->head
->ns_id
, ids
, &id
);
3907 ret
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
3908 if (!nvme_ns_ids_equal(&ns
->head
->ids
, ids
)) {
3909 dev_err(ns
->ctrl
->device
,
3910 "identifiers changed for nsid %d\n", ns
->head
->ns_id
);
3914 ret
= nvme_update_ns_info(ns
, id
);
3920 * Only remove the namespace if we got a fatal error back from the
3921 * device, otherwise ignore the error and just move on.
3923 * TODO: we should probably schedule a delayed retry here.
3925 if (ret
> 0 && (ret
& NVME_SC_DNR
))
3929 static void nvme_validate_or_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3931 struct nvme_ns_ids ids
= { };
3934 if (nvme_identify_ns_descs(ctrl
, nsid
, &ids
))
3937 ns
= nvme_find_get_ns(ctrl
, nsid
);
3939 nvme_validate_ns(ns
, &ids
);
3946 nvme_alloc_ns(ctrl
, nsid
, &ids
);
3949 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
)) {
3950 dev_warn(ctrl
->device
,
3951 "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
3955 if (!nvme_multi_css(ctrl
)) {
3956 dev_warn(ctrl
->device
,
3957 "command set not reported for nsid: %d\n",
3961 nvme_alloc_ns(ctrl
, nsid
, &ids
);
3964 dev_warn(ctrl
->device
, "unknown csi %u for nsid %u\n",
3970 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
3973 struct nvme_ns
*ns
, *next
;
3976 down_write(&ctrl
->namespaces_rwsem
);
3977 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
3978 if (ns
->head
->ns_id
> nsid
|| test_bit(NVME_NS_DEAD
, &ns
->flags
))
3979 list_move_tail(&ns
->list
, &rm_list
);
3981 up_write(&ctrl
->namespaces_rwsem
);
3983 list_for_each_entry_safe(ns
, next
, &rm_list
, list
)
3988 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
)
3990 const int nr_entries
= NVME_IDENTIFY_DATA_SIZE
/ sizeof(__le32
);
3995 if (nvme_ctrl_limited_cns(ctrl
))
3998 ns_list
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
4003 struct nvme_command cmd
= {
4004 .identify
.opcode
= nvme_admin_identify
,
4005 .identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
,
4006 .identify
.nsid
= cpu_to_le32(prev
),
4009 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, ns_list
,
4010 NVME_IDENTIFY_DATA_SIZE
);
4012 dev_warn(ctrl
->device
,
4013 "Identify NS List failed (status=0x%x)\n", ret
);
4017 for (i
= 0; i
< nr_entries
; i
++) {
4018 u32 nsid
= le32_to_cpu(ns_list
[i
]);
4020 if (!nsid
) /* end of the list? */
4022 nvme_validate_or_alloc_ns(ctrl
, nsid
);
4023 while (++prev
< nsid
)
4024 nvme_ns_remove_by_nsid(ctrl
, prev
);
4028 nvme_remove_invalid_namespaces(ctrl
, prev
);
4034 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
)
4036 struct nvme_id_ctrl
*id
;
4039 if (nvme_identify_ctrl(ctrl
, &id
))
4041 nn
= le32_to_cpu(id
->nn
);
4044 for (i
= 1; i
<= nn
; i
++)
4045 nvme_validate_or_alloc_ns(ctrl
, i
);
4047 nvme_remove_invalid_namespaces(ctrl
, nn
);
4050 static void nvme_clear_changed_ns_log(struct nvme_ctrl
*ctrl
)
4052 size_t log_size
= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
);
4056 log
= kzalloc(log_size
, GFP_KERNEL
);
4061 * We need to read the log to clear the AEN, but we don't want to rely
4062 * on it for the changed namespace information as userspace could have
4063 * raced with us in reading the log page, which could cause us to miss
4066 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CHANGED_NS
, 0,
4067 NVME_CSI_NVM
, log
, log_size
, 0);
4069 dev_warn(ctrl
->device
,
4070 "reading changed ns log failed: %d\n", error
);
4075 static void nvme_scan_work(struct work_struct
*work
)
4077 struct nvme_ctrl
*ctrl
=
4078 container_of(work
, struct nvme_ctrl
, scan_work
);
4080 /* No tagset on a live ctrl means IO queues could not created */
4081 if (ctrl
->state
!= NVME_CTRL_LIVE
|| !ctrl
->tagset
)
4084 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
)) {
4085 dev_info(ctrl
->device
, "rescanning namespaces.\n");
4086 nvme_clear_changed_ns_log(ctrl
);
4089 mutex_lock(&ctrl
->scan_lock
);
4090 if (nvme_scan_ns_list(ctrl
) != 0)
4091 nvme_scan_ns_sequential(ctrl
);
4092 mutex_unlock(&ctrl
->scan_lock
);
4096 * This function iterates the namespace list unlocked to allow recovery from
4097 * controller failure. It is up to the caller to ensure the namespace list is
4098 * not modified by scan work while this function is executing.
4100 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
4102 struct nvme_ns
*ns
, *next
;
4106 * make sure to requeue I/O to all namespaces as these
4107 * might result from the scan itself and must complete
4108 * for the scan_work to make progress
4110 nvme_mpath_clear_ctrl_paths(ctrl
);
4112 /* prevent racing with ns scanning */
4113 flush_work(&ctrl
->scan_work
);
4116 * The dead states indicates the controller was not gracefully
4117 * disconnected. In that case, we won't be able to flush any data while
4118 * removing the namespaces' disks; fail all the queues now to avoid
4119 * potentially having to clean up the failed sync later.
4121 if (ctrl
->state
== NVME_CTRL_DEAD
)
4122 nvme_kill_queues(ctrl
);
4124 /* this is a no-op when called from the controller reset handler */
4125 nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING_NOIO
);
4127 down_write(&ctrl
->namespaces_rwsem
);
4128 list_splice_init(&ctrl
->namespaces
, &ns_list
);
4129 up_write(&ctrl
->namespaces_rwsem
);
4131 list_for_each_entry_safe(ns
, next
, &ns_list
, list
)
4134 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
4136 static int nvme_class_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
4138 struct nvme_ctrl
*ctrl
=
4139 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4140 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
4143 ret
= add_uevent_var(env
, "NVME_TRTYPE=%s", ctrl
->ops
->name
);
4148 ret
= add_uevent_var(env
, "NVME_TRADDR=%s", opts
->traddr
);
4152 ret
= add_uevent_var(env
, "NVME_TRSVCID=%s",
4153 opts
->trsvcid
?: "none");
4157 ret
= add_uevent_var(env
, "NVME_HOST_TRADDR=%s",
4158 opts
->host_traddr
?: "none");
4162 ret
= add_uevent_var(env
, "NVME_HOST_IFACE=%s",
4163 opts
->host_iface
?: "none");
4168 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
4170 char *envp
[2] = { NULL
, NULL
};
4171 u32 aen_result
= ctrl
->aen_result
;
4173 ctrl
->aen_result
= 0;
4177 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
4180 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
4184 static void nvme_async_event_work(struct work_struct
*work
)
4186 struct nvme_ctrl
*ctrl
=
4187 container_of(work
, struct nvme_ctrl
, async_event_work
);
4189 nvme_aen_uevent(ctrl
);
4192 * The transport drivers must guarantee AER submission here is safe by
4193 * flushing ctrl async_event_work after changing the controller state
4194 * from LIVE and before freeing the admin queue.
4196 if (ctrl
->state
== NVME_CTRL_LIVE
)
4197 ctrl
->ops
->submit_async_event(ctrl
);
4200 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
4205 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
4211 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
4214 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
4216 struct nvme_fw_slot_info_log
*log
;
4218 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
4222 if (nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_FW_SLOT
, 0, NVME_CSI_NVM
,
4223 log
, sizeof(*log
), 0))
4224 dev_warn(ctrl
->device
, "Get FW SLOT INFO log error\n");
4228 static void nvme_fw_act_work(struct work_struct
*work
)
4230 struct nvme_ctrl
*ctrl
= container_of(work
,
4231 struct nvme_ctrl
, fw_act_work
);
4232 unsigned long fw_act_timeout
;
4235 fw_act_timeout
= jiffies
+
4236 msecs_to_jiffies(ctrl
->mtfa
* 100);
4238 fw_act_timeout
= jiffies
+
4239 msecs_to_jiffies(admin_timeout
* 1000);
4241 nvme_stop_queues(ctrl
);
4242 while (nvme_ctrl_pp_status(ctrl
)) {
4243 if (time_after(jiffies
, fw_act_timeout
)) {
4244 dev_warn(ctrl
->device
,
4245 "Fw activation timeout, reset controller\n");
4246 nvme_try_sched_reset(ctrl
);
4252 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
))
4255 nvme_start_queues(ctrl
);
4256 /* read FW slot information to clear the AER */
4257 nvme_get_fw_slot_info(ctrl
);
4260 static void nvme_handle_aen_notice(struct nvme_ctrl
*ctrl
, u32 result
)
4262 u32 aer_notice_type
= (result
& 0xff00) >> 8;
4264 trace_nvme_async_event(ctrl
, aer_notice_type
);
4266 switch (aer_notice_type
) {
4267 case NVME_AER_NOTICE_NS_CHANGED
:
4268 set_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
);
4269 nvme_queue_scan(ctrl
);
4271 case NVME_AER_NOTICE_FW_ACT_STARTING
:
4273 * We are (ab)using the RESETTING state to prevent subsequent
4274 * recovery actions from interfering with the controller's
4275 * firmware activation.
4277 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
4278 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
4280 #ifdef CONFIG_NVME_MULTIPATH
4281 case NVME_AER_NOTICE_ANA
:
4282 if (!ctrl
->ana_log_buf
)
4284 queue_work(nvme_wq
, &ctrl
->ana_work
);
4287 case NVME_AER_NOTICE_DISC_CHANGED
:
4288 ctrl
->aen_result
= result
;
4291 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
4295 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
4296 volatile union nvme_result
*res
)
4298 u32 result
= le32_to_cpu(res
->u32
);
4299 u32 aer_type
= result
& 0x07;
4301 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
4305 case NVME_AER_NOTICE
:
4306 nvme_handle_aen_notice(ctrl
, result
);
4308 case NVME_AER_ERROR
:
4309 case NVME_AER_SMART
:
4312 trace_nvme_async_event(ctrl
, aer_type
);
4313 ctrl
->aen_result
= result
;
4318 queue_work(nvme_wq
, &ctrl
->async_event_work
);
4320 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
4322 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
4324 nvme_mpath_stop(ctrl
);
4325 nvme_stop_keep_alive(ctrl
);
4326 nvme_stop_failfast_work(ctrl
);
4327 flush_work(&ctrl
->async_event_work
);
4328 cancel_work_sync(&ctrl
->fw_act_work
);
4330 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
4332 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
4334 nvme_start_keep_alive(ctrl
);
4336 nvme_enable_aen(ctrl
);
4338 if (ctrl
->queue_count
> 1) {
4339 nvme_queue_scan(ctrl
);
4340 nvme_start_queues(ctrl
);
4343 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
4345 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
4347 nvme_hwmon_exit(ctrl
);
4348 nvme_fault_inject_fini(&ctrl
->fault_inject
);
4349 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
4350 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
4351 nvme_put_ctrl(ctrl
);
4353 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
4355 static void nvme_free_cels(struct nvme_ctrl
*ctrl
)
4357 struct nvme_effects_log
*cel
;
4360 xa_for_each(&ctrl
->cels
, i
, cel
) {
4361 xa_erase(&ctrl
->cels
, i
);
4365 xa_destroy(&ctrl
->cels
);
4368 static void nvme_free_ctrl(struct device
*dev
)
4370 struct nvme_ctrl
*ctrl
=
4371 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4372 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
4374 if (!subsys
|| ctrl
->instance
!= subsys
->instance
)
4375 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4377 nvme_free_cels(ctrl
);
4378 nvme_mpath_uninit(ctrl
);
4379 __free_page(ctrl
->discard_page
);
4382 mutex_lock(&nvme_subsystems_lock
);
4383 list_del(&ctrl
->subsys_entry
);
4384 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
4385 mutex_unlock(&nvme_subsystems_lock
);
4388 ctrl
->ops
->free_ctrl(ctrl
);
4391 nvme_put_subsystem(subsys
);
4395 * Initialize a NVMe controller structures. This needs to be called during
4396 * earliest initialization so that we have the initialized structured around
4399 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
4400 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
4404 ctrl
->state
= NVME_CTRL_NEW
;
4405 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
4406 spin_lock_init(&ctrl
->lock
);
4407 mutex_init(&ctrl
->scan_lock
);
4408 INIT_LIST_HEAD(&ctrl
->namespaces
);
4409 xa_init(&ctrl
->cels
);
4410 init_rwsem(&ctrl
->namespaces_rwsem
);
4413 ctrl
->quirks
= quirks
;
4414 ctrl
->numa_node
= NUMA_NO_NODE
;
4415 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
4416 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
4417 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
4418 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
4419 init_waitqueue_head(&ctrl
->state_wq
);
4421 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
4422 INIT_DELAYED_WORK(&ctrl
->failfast_work
, nvme_failfast_work
);
4423 memset(&ctrl
->ka_cmd
, 0, sizeof(ctrl
->ka_cmd
));
4424 ctrl
->ka_cmd
.common
.opcode
= nvme_admin_keep_alive
;
4426 BUILD_BUG_ON(NVME_DSM_MAX_RANGES
* sizeof(struct nvme_dsm_range
) >
4428 ctrl
->discard_page
= alloc_page(GFP_KERNEL
);
4429 if (!ctrl
->discard_page
) {
4434 ret
= ida_simple_get(&nvme_instance_ida
, 0, 0, GFP_KERNEL
);
4437 ctrl
->instance
= ret
;
4439 device_initialize(&ctrl
->ctrl_device
);
4440 ctrl
->device
= &ctrl
->ctrl_device
;
4441 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_ctrl_base_chr_devt
),
4443 ctrl
->device
->class = nvme_class
;
4444 ctrl
->device
->parent
= ctrl
->dev
;
4445 ctrl
->device
->groups
= nvme_dev_attr_groups
;
4446 ctrl
->device
->release
= nvme_free_ctrl
;
4447 dev_set_drvdata(ctrl
->device
, ctrl
);
4448 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
4450 goto out_release_instance
;
4452 nvme_get_ctrl(ctrl
);
4453 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
4454 ctrl
->cdev
.owner
= ops
->module
;
4455 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
4460 * Initialize latency tolerance controls. The sysfs files won't
4461 * be visible to userspace unless the device actually supports APST.
4463 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
4464 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
4465 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
4467 nvme_fault_inject_init(&ctrl
->fault_inject
, dev_name(ctrl
->device
));
4468 nvme_mpath_init_ctrl(ctrl
);
4472 nvme_put_ctrl(ctrl
);
4473 kfree_const(ctrl
->device
->kobj
.name
);
4474 out_release_instance
:
4475 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4477 if (ctrl
->discard_page
)
4478 __free_page(ctrl
->discard_page
);
4481 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
4484 * nvme_kill_queues(): Ends all namespace queues
4485 * @ctrl: the dead controller that needs to end
4487 * Call this function when the driver determines it is unable to get the
4488 * controller in a state capable of servicing IO.
4490 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
4494 down_read(&ctrl
->namespaces_rwsem
);
4496 /* Forcibly unquiesce queues to avoid blocking dispatch */
4497 if (ctrl
->admin_q
&& !blk_queue_dying(ctrl
->admin_q
))
4498 blk_mq_unquiesce_queue(ctrl
->admin_q
);
4500 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4501 nvme_set_queue_dying(ns
);
4503 up_read(&ctrl
->namespaces_rwsem
);
4505 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
4507 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
4511 down_read(&ctrl
->namespaces_rwsem
);
4512 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4513 blk_mq_unfreeze_queue(ns
->queue
);
4514 up_read(&ctrl
->namespaces_rwsem
);
4516 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
4518 int nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
4522 down_read(&ctrl
->namespaces_rwsem
);
4523 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
4524 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
4528 up_read(&ctrl
->namespaces_rwsem
);
4531 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
4533 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
4537 down_read(&ctrl
->namespaces_rwsem
);
4538 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4539 blk_mq_freeze_queue_wait(ns
->queue
);
4540 up_read(&ctrl
->namespaces_rwsem
);
4542 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
4544 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
4548 down_read(&ctrl
->namespaces_rwsem
);
4549 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4550 blk_freeze_queue_start(ns
->queue
);
4551 up_read(&ctrl
->namespaces_rwsem
);
4553 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
4555 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
4559 down_read(&ctrl
->namespaces_rwsem
);
4560 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4561 blk_mq_quiesce_queue(ns
->queue
);
4562 up_read(&ctrl
->namespaces_rwsem
);
4564 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
4566 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
4570 down_read(&ctrl
->namespaces_rwsem
);
4571 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4572 blk_mq_unquiesce_queue(ns
->queue
);
4573 up_read(&ctrl
->namespaces_rwsem
);
4575 EXPORT_SYMBOL_GPL(nvme_start_queues
);
4577 void nvme_sync_io_queues(struct nvme_ctrl
*ctrl
)
4581 down_read(&ctrl
->namespaces_rwsem
);
4582 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4583 blk_sync_queue(ns
->queue
);
4584 up_read(&ctrl
->namespaces_rwsem
);
4586 EXPORT_SYMBOL_GPL(nvme_sync_io_queues
);
4588 void nvme_sync_queues(struct nvme_ctrl
*ctrl
)
4590 nvme_sync_io_queues(ctrl
);
4592 blk_sync_queue(ctrl
->admin_q
);
4594 EXPORT_SYMBOL_GPL(nvme_sync_queues
);
4596 struct nvme_ctrl
*nvme_ctrl_from_file(struct file
*file
)
4598 if (file
->f_op
!= &nvme_dev_fops
)
4600 return file
->private_data
;
4602 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file
, NVME_TARGET_PASSTHRU
);
4605 * Check we didn't inadvertently grow the command structure sizes:
4607 static inline void _nvme_check_size(void)
4609 BUILD_BUG_ON(sizeof(struct nvme_common_command
) != 64);
4610 BUILD_BUG_ON(sizeof(struct nvme_rw_command
) != 64);
4611 BUILD_BUG_ON(sizeof(struct nvme_identify
) != 64);
4612 BUILD_BUG_ON(sizeof(struct nvme_features
) != 64);
4613 BUILD_BUG_ON(sizeof(struct nvme_download_firmware
) != 64);
4614 BUILD_BUG_ON(sizeof(struct nvme_format_cmd
) != 64);
4615 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd
) != 64);
4616 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd
) != 64);
4617 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd
) != 64);
4618 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command
) != 64);
4619 BUILD_BUG_ON(sizeof(struct nvme_command
) != 64);
4620 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl
) != NVME_IDENTIFY_DATA_SIZE
);
4621 BUILD_BUG_ON(sizeof(struct nvme_id_ns
) != NVME_IDENTIFY_DATA_SIZE
);
4622 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns
) != NVME_IDENTIFY_DATA_SIZE
);
4623 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns
) != NVME_IDENTIFY_DATA_SIZE
);
4624 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm
) != NVME_IDENTIFY_DATA_SIZE
);
4625 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type
) != 64);
4626 BUILD_BUG_ON(sizeof(struct nvme_smart_log
) != 512);
4627 BUILD_BUG_ON(sizeof(struct nvme_dbbuf
) != 64);
4628 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd
) != 64);
4632 static int __init
nvme_core_init(void)
4634 int result
= -ENOMEM
;
4638 nvme_wq
= alloc_workqueue("nvme-wq",
4639 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4643 nvme_reset_wq
= alloc_workqueue("nvme-reset-wq",
4644 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4648 nvme_delete_wq
= alloc_workqueue("nvme-delete-wq",
4649 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4650 if (!nvme_delete_wq
)
4651 goto destroy_reset_wq
;
4653 result
= alloc_chrdev_region(&nvme_ctrl_base_chr_devt
, 0,
4654 NVME_MINORS
, "nvme");
4656 goto destroy_delete_wq
;
4658 nvme_class
= class_create(THIS_MODULE
, "nvme");
4659 if (IS_ERR(nvme_class
)) {
4660 result
= PTR_ERR(nvme_class
);
4661 goto unregister_chrdev
;
4663 nvme_class
->dev_uevent
= nvme_class_uevent
;
4665 nvme_subsys_class
= class_create(THIS_MODULE
, "nvme-subsystem");
4666 if (IS_ERR(nvme_subsys_class
)) {
4667 result
= PTR_ERR(nvme_subsys_class
);
4671 result
= alloc_chrdev_region(&nvme_ns_chr_devt
, 0, NVME_MINORS
,
4674 goto destroy_subsys_class
;
4676 nvme_ns_chr_class
= class_create(THIS_MODULE
, "nvme-generic");
4677 if (IS_ERR(nvme_ns_chr_class
)) {
4678 result
= PTR_ERR(nvme_ns_chr_class
);
4679 goto unregister_generic_ns
;
4684 unregister_generic_ns
:
4685 unregister_chrdev_region(nvme_ns_chr_devt
, NVME_MINORS
);
4686 destroy_subsys_class
:
4687 class_destroy(nvme_subsys_class
);
4689 class_destroy(nvme_class
);
4691 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
4693 destroy_workqueue(nvme_delete_wq
);
4695 destroy_workqueue(nvme_reset_wq
);
4697 destroy_workqueue(nvme_wq
);
4702 static void __exit
nvme_core_exit(void)
4704 class_destroy(nvme_ns_chr_class
);
4705 class_destroy(nvme_subsys_class
);
4706 class_destroy(nvme_class
);
4707 unregister_chrdev_region(nvme_ns_chr_devt
, NVME_MINORS
);
4708 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
4709 destroy_workqueue(nvme_delete_wq
);
4710 destroy_workqueue(nvme_reset_wq
);
4711 destroy_workqueue(nvme_wq
);
4712 ida_destroy(&nvme_ns_chr_minor_ida
);
4713 ida_destroy(&nvme_instance_ida
);
4716 MODULE_LICENSE("GPL");
4717 MODULE_VERSION("1.0");
4718 module_init(nvme_core_init
);
4719 module_exit(nvme_core_exit
);