1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/hdreg.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/backing-dev.h>
15 #include <linux/list_sort.h>
16 #include <linux/slab.h>
17 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/nvme_ioctl.h>
21 #include <linux/t10-pi.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
28 #define CREATE_TRACE_POINTS
31 #define NVME_MINORS (1U << MINORBITS)
33 unsigned int admin_timeout
= 60;
34 module_param(admin_timeout
, uint
, 0644);
35 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
36 EXPORT_SYMBOL_GPL(admin_timeout
);
38 unsigned int nvme_io_timeout
= 30;
39 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
40 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
41 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
43 static unsigned char shutdown_timeout
= 5;
44 module_param(shutdown_timeout
, byte
, 0644);
45 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
47 static u8 nvme_max_retries
= 5;
48 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
49 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
51 static unsigned long default_ps_max_latency_us
= 100000;
52 module_param(default_ps_max_latency_us
, ulong
, 0644);
53 MODULE_PARM_DESC(default_ps_max_latency_us
,
54 "max power saving latency for new devices; use PM QOS to change per device");
56 static bool force_apst
;
57 module_param(force_apst
, bool, 0644);
58 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
61 module_param(streams
, bool, 0644);
62 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
65 * nvme_wq - hosts nvme related works that are not reset or delete
66 * nvme_reset_wq - hosts nvme reset works
67 * nvme_delete_wq - hosts nvme delete works
69 * nvme_wq will host works such are scan, aen handling, fw activation,
70 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
71 * runs reset works which also flush works hosted on nvme_wq for
72 * serialization purposes. nvme_delete_wq host controller deletion
73 * works which flush reset works for serialization.
75 struct workqueue_struct
*nvme_wq
;
76 EXPORT_SYMBOL_GPL(nvme_wq
);
78 struct workqueue_struct
*nvme_reset_wq
;
79 EXPORT_SYMBOL_GPL(nvme_reset_wq
);
81 struct workqueue_struct
*nvme_delete_wq
;
82 EXPORT_SYMBOL_GPL(nvme_delete_wq
);
84 static LIST_HEAD(nvme_subsystems
);
85 static DEFINE_MUTEX(nvme_subsystems_lock
);
87 static DEFINE_IDA(nvme_instance_ida
);
88 static dev_t nvme_chr_devt
;
89 static struct class *nvme_class
;
90 static struct class *nvme_subsys_class
;
92 static int nvme_revalidate_disk(struct gendisk
*disk
);
93 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
);
94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
97 static void nvme_set_queue_dying(struct nvme_ns
*ns
)
100 * Revalidating a dead namespace sets capacity to 0. This will end
101 * buffered writers dirtying pages that can't be synced.
103 if (!ns
->disk
|| test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
105 blk_set_queue_dying(ns
->queue
);
106 /* Forcibly unquiesce queues to avoid blocking dispatch */
107 blk_mq_unquiesce_queue(ns
->queue
);
109 * Revalidate after unblocking dispatchers that may be holding bd_butex
111 revalidate_disk(ns
->disk
);
114 static void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
117 * Only new queue scan work when admin and IO queues are both alive
119 if (ctrl
->state
== NVME_CTRL_LIVE
&& ctrl
->tagset
)
120 queue_work(nvme_wq
, &ctrl
->scan_work
);
124 * Use this function to proceed with scheduling reset_work for a controller
125 * that had previously been set to the resetting state. This is intended for
126 * code paths that can't be interrupted by other reset attempts. A hot removal
127 * may prevent this from succeeding.
129 int nvme_try_sched_reset(struct nvme_ctrl
*ctrl
)
131 if (ctrl
->state
!= NVME_CTRL_RESETTING
)
133 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
137 EXPORT_SYMBOL_GPL(nvme_try_sched_reset
);
139 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
141 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
143 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
147 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
149 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
153 ret
= nvme_reset_ctrl(ctrl
);
155 flush_work(&ctrl
->reset_work
);
156 if (ctrl
->state
!= NVME_CTRL_LIVE
)
162 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync
);
164 static void nvme_do_delete_ctrl(struct nvme_ctrl
*ctrl
)
166 dev_info(ctrl
->device
,
167 "Removing ctrl: NQN \"%s\"\n", ctrl
->opts
->subsysnqn
);
169 flush_work(&ctrl
->reset_work
);
170 nvme_stop_ctrl(ctrl
);
171 nvme_remove_namespaces(ctrl
);
172 ctrl
->ops
->delete_ctrl(ctrl
);
173 nvme_uninit_ctrl(ctrl
);
177 static void nvme_delete_ctrl_work(struct work_struct
*work
)
179 struct nvme_ctrl
*ctrl
=
180 container_of(work
, struct nvme_ctrl
, delete_work
);
182 nvme_do_delete_ctrl(ctrl
);
185 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
187 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
189 if (!queue_work(nvme_delete_wq
, &ctrl
->delete_work
))
193 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
195 static int nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
200 * Keep a reference until nvme_do_delete_ctrl() complete,
201 * since ->delete_ctrl can free the controller.
204 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
207 nvme_do_delete_ctrl(ctrl
);
212 static inline bool nvme_ns_has_pi(struct nvme_ns
*ns
)
214 return ns
->pi_type
&& ns
->ms
== sizeof(struct t10_pi_tuple
);
217 static blk_status_t
nvme_error_status(u16 status
)
219 switch (status
& 0x7ff) {
220 case NVME_SC_SUCCESS
:
222 case NVME_SC_CAP_EXCEEDED
:
223 return BLK_STS_NOSPC
;
224 case NVME_SC_LBA_RANGE
:
225 return BLK_STS_TARGET
;
226 case NVME_SC_BAD_ATTRIBUTES
:
227 case NVME_SC_ONCS_NOT_SUPPORTED
:
228 case NVME_SC_INVALID_OPCODE
:
229 case NVME_SC_INVALID_FIELD
:
230 case NVME_SC_INVALID_NS
:
231 return BLK_STS_NOTSUPP
;
232 case NVME_SC_WRITE_FAULT
:
233 case NVME_SC_READ_ERROR
:
234 case NVME_SC_UNWRITTEN_BLOCK
:
235 case NVME_SC_ACCESS_DENIED
:
236 case NVME_SC_READ_ONLY
:
237 case NVME_SC_COMPARE_FAILED
:
238 return BLK_STS_MEDIUM
;
239 case NVME_SC_GUARD_CHECK
:
240 case NVME_SC_APPTAG_CHECK
:
241 case NVME_SC_REFTAG_CHECK
:
242 case NVME_SC_INVALID_PI
:
243 return BLK_STS_PROTECTION
;
244 case NVME_SC_RESERVATION_CONFLICT
:
245 return BLK_STS_NEXUS
;
246 case NVME_SC_HOST_PATH_ERROR
:
247 return BLK_STS_TRANSPORT
;
249 return BLK_STS_IOERR
;
253 static inline bool nvme_req_needs_retry(struct request
*req
)
255 if (blk_noretry_request(req
))
257 if (nvme_req(req
)->status
& NVME_SC_DNR
)
259 if (nvme_req(req
)->retries
>= nvme_max_retries
)
264 static void nvme_retry_req(struct request
*req
)
266 struct nvme_ns
*ns
= req
->q
->queuedata
;
267 unsigned long delay
= 0;
270 /* The mask and shift result must be <= 3 */
271 crd
= (nvme_req(req
)->status
& NVME_SC_CRD
) >> 11;
273 delay
= ns
->ctrl
->crdt
[crd
- 1] * 100;
275 nvme_req(req
)->retries
++;
276 blk_mq_requeue_request(req
, false);
277 blk_mq_delay_kick_requeue_list(req
->q
, delay
);
280 void nvme_complete_rq(struct request
*req
)
282 blk_status_t status
= nvme_error_status(nvme_req(req
)->status
);
284 trace_nvme_complete_rq(req
);
286 nvme_cleanup_cmd(req
);
288 if (nvme_req(req
)->ctrl
->kas
)
289 nvme_req(req
)->ctrl
->comp_seen
= true;
291 if (unlikely(status
!= BLK_STS_OK
&& nvme_req_needs_retry(req
))) {
292 if ((req
->cmd_flags
& REQ_NVME_MPATH
) &&
293 blk_path_error(status
)) {
294 nvme_failover_req(req
);
298 if (!blk_queue_dying(req
->q
)) {
304 nvme_trace_bio_complete(req
, status
);
305 blk_mq_end_request(req
, status
);
307 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
309 bool nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
311 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
312 "Cancelling I/O %d", req
->tag
);
314 /* don't abort one completed request */
315 if (blk_mq_request_completed(req
))
318 nvme_req(req
)->status
= NVME_SC_HOST_ABORTED_CMD
;
319 blk_mq_complete_request(req
);
322 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
324 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
325 enum nvme_ctrl_state new_state
)
327 enum nvme_ctrl_state old_state
;
329 bool changed
= false;
331 spin_lock_irqsave(&ctrl
->lock
, flags
);
333 old_state
= ctrl
->state
;
338 case NVME_CTRL_RESETTING
:
339 case NVME_CTRL_CONNECTING
:
346 case NVME_CTRL_RESETTING
:
356 case NVME_CTRL_CONNECTING
:
359 case NVME_CTRL_RESETTING
:
366 case NVME_CTRL_DELETING
:
369 case NVME_CTRL_RESETTING
:
370 case NVME_CTRL_CONNECTING
:
379 case NVME_CTRL_DELETING
:
391 ctrl
->state
= new_state
;
392 wake_up_all(&ctrl
->state_wq
);
395 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
396 if (changed
&& ctrl
->state
== NVME_CTRL_LIVE
)
397 nvme_kick_requeue_lists(ctrl
);
400 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
403 * Returns true for sink states that can't ever transition back to live.
405 static bool nvme_state_terminal(struct nvme_ctrl
*ctrl
)
407 switch (ctrl
->state
) {
410 case NVME_CTRL_RESETTING
:
411 case NVME_CTRL_CONNECTING
:
413 case NVME_CTRL_DELETING
:
417 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl
->state
);
423 * Waits for the controller state to be resetting, or returns false if it is
424 * not possible to ever transition to that state.
426 bool nvme_wait_reset(struct nvme_ctrl
*ctrl
)
428 wait_event(ctrl
->state_wq
,
429 nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
) ||
430 nvme_state_terminal(ctrl
));
431 return ctrl
->state
== NVME_CTRL_RESETTING
;
433 EXPORT_SYMBOL_GPL(nvme_wait_reset
);
435 static void nvme_free_ns_head(struct kref
*ref
)
437 struct nvme_ns_head
*head
=
438 container_of(ref
, struct nvme_ns_head
, ref
);
440 nvme_mpath_remove_disk(head
);
441 ida_simple_remove(&head
->subsys
->ns_ida
, head
->instance
);
442 list_del_init(&head
->entry
);
443 cleanup_srcu_struct(&head
->srcu
);
444 nvme_put_subsystem(head
->subsys
);
448 static void nvme_put_ns_head(struct nvme_ns_head
*head
)
450 kref_put(&head
->ref
, nvme_free_ns_head
);
453 static void nvme_free_ns(struct kref
*kref
)
455 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
458 nvme_nvm_unregister(ns
);
461 nvme_put_ns_head(ns
->head
);
462 nvme_put_ctrl(ns
->ctrl
);
466 static void nvme_put_ns(struct nvme_ns
*ns
)
468 kref_put(&ns
->kref
, nvme_free_ns
);
471 static inline void nvme_clear_nvme_request(struct request
*req
)
473 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
474 nvme_req(req
)->retries
= 0;
475 nvme_req(req
)->flags
= 0;
476 req
->rq_flags
|= RQF_DONTPREP
;
480 struct request
*nvme_alloc_request(struct request_queue
*q
,
481 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
, int qid
)
483 unsigned op
= nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
486 if (qid
== NVME_QID_ANY
) {
487 req
= blk_mq_alloc_request(q
, op
, flags
);
489 req
= blk_mq_alloc_request_hctx(q
, op
, flags
,
495 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
496 nvme_clear_nvme_request(req
);
497 nvme_req(req
)->cmd
= cmd
;
501 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
503 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
505 struct nvme_command c
;
507 memset(&c
, 0, sizeof(c
));
509 c
.directive
.opcode
= nvme_admin_directive_send
;
510 c
.directive
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
511 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
512 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
513 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
514 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
516 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
519 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
521 return nvme_toggle_streams(ctrl
, false);
524 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
526 return nvme_toggle_streams(ctrl
, true);
529 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
530 struct streams_directive_params
*s
, u32 nsid
)
532 struct nvme_command c
;
534 memset(&c
, 0, sizeof(c
));
535 memset(s
, 0, sizeof(*s
));
537 c
.directive
.opcode
= nvme_admin_directive_recv
;
538 c
.directive
.nsid
= cpu_to_le32(nsid
);
539 c
.directive
.numd
= cpu_to_le32((sizeof(*s
) >> 2) - 1);
540 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
541 c
.directive
.dtype
= NVME_DIR_STREAMS
;
543 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
546 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
548 struct streams_directive_params s
;
551 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
556 ret
= nvme_enable_streams(ctrl
);
560 ret
= nvme_get_stream_params(ctrl
, &s
, NVME_NSID_ALL
);
564 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
565 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
566 dev_info(ctrl
->device
, "too few streams (%u) available\n",
568 nvme_disable_streams(ctrl
);
572 ctrl
->nr_streams
= min_t(unsigned, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
573 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
578 * Check if 'req' has a write hint associated with it. If it does, assign
579 * a valid namespace stream to the write.
581 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
582 struct request
*req
, u16
*control
,
585 enum rw_hint streamid
= req
->write_hint
;
587 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
591 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
594 *control
|= NVME_RW_DTYPE_STREAMS
;
595 *dsmgmt
|= streamid
<< 16;
598 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
599 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
602 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
603 struct nvme_command
*cmnd
)
605 cmnd
->common
.opcode
= nvme_cmd_flush
;
606 cmnd
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
609 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
610 struct nvme_command
*cmnd
)
612 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
613 struct nvme_dsm_range
*range
;
617 * Some devices do not consider the DSM 'Number of Ranges' field when
618 * determining how much data to DMA. Always allocate memory for maximum
619 * number of segments to prevent device reading beyond end of buffer.
621 static const size_t alloc_size
= sizeof(*range
) * NVME_DSM_MAX_RANGES
;
623 range
= kzalloc(alloc_size
, GFP_ATOMIC
| __GFP_NOWARN
);
626 * If we fail allocation our range, fallback to the controller
627 * discard page. If that's also busy, it's safe to return
628 * busy, as we know we can make progress once that's freed.
630 if (test_and_set_bit_lock(0, &ns
->ctrl
->discard_page_busy
))
631 return BLK_STS_RESOURCE
;
633 range
= page_address(ns
->ctrl
->discard_page
);
636 __rq_for_each_bio(bio
, req
) {
637 u64 slba
= nvme_sect_to_lba(ns
, bio
->bi_iter
.bi_sector
);
638 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
641 range
[n
].cattr
= cpu_to_le32(0);
642 range
[n
].nlb
= cpu_to_le32(nlb
);
643 range
[n
].slba
= cpu_to_le64(slba
);
648 if (WARN_ON_ONCE(n
!= segments
)) {
649 if (virt_to_page(range
) == ns
->ctrl
->discard_page
)
650 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
653 return BLK_STS_IOERR
;
656 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
657 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
658 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
659 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
661 req
->special_vec
.bv_page
= virt_to_page(range
);
662 req
->special_vec
.bv_offset
= offset_in_page(range
);
663 req
->special_vec
.bv_len
= alloc_size
;
664 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
669 static inline blk_status_t
nvme_setup_write_zeroes(struct nvme_ns
*ns
,
670 struct request
*req
, struct nvme_command
*cmnd
)
672 if (ns
->ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
673 return nvme_setup_discard(ns
, req
, cmnd
);
675 cmnd
->write_zeroes
.opcode
= nvme_cmd_write_zeroes
;
676 cmnd
->write_zeroes
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
677 cmnd
->write_zeroes
.slba
=
678 cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
679 cmnd
->write_zeroes
.length
=
680 cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
681 cmnd
->write_zeroes
.control
= 0;
685 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
686 struct request
*req
, struct nvme_command
*cmnd
)
688 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
692 if (req
->cmd_flags
& REQ_FUA
)
693 control
|= NVME_RW_FUA
;
694 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
695 control
|= NVME_RW_LR
;
697 if (req
->cmd_flags
& REQ_RAHEAD
)
698 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
700 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
701 cmnd
->rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
702 cmnd
->rw
.slba
= cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
703 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
705 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
706 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
710 * If formated with metadata, the block layer always provides a
711 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
712 * we enable the PRACT bit for protection information or set the
713 * namespace capacity to zero to prevent any I/O.
715 if (!blk_integrity_rq(req
)) {
716 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
717 return BLK_STS_NOTSUPP
;
718 control
|= NVME_RW_PRINFO_PRACT
;
721 switch (ns
->pi_type
) {
722 case NVME_NS_DPS_PI_TYPE3
:
723 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
725 case NVME_NS_DPS_PI_TYPE1
:
726 case NVME_NS_DPS_PI_TYPE2
:
727 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
728 NVME_RW_PRINFO_PRCHK_REF
;
729 cmnd
->rw
.reftag
= cpu_to_le32(t10_pi_ref_tag(req
));
734 cmnd
->rw
.control
= cpu_to_le16(control
);
735 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
739 void nvme_cleanup_cmd(struct request
*req
)
741 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
742 struct nvme_ns
*ns
= req
->rq_disk
->private_data
;
743 struct page
*page
= req
->special_vec
.bv_page
;
745 if (page
== ns
->ctrl
->discard_page
)
746 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
748 kfree(page_address(page
) + req
->special_vec
.bv_offset
);
751 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd
);
753 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
754 struct nvme_command
*cmd
)
756 blk_status_t ret
= BLK_STS_OK
;
758 nvme_clear_nvme_request(req
);
760 memset(cmd
, 0, sizeof(*cmd
));
761 switch (req_op(req
)) {
764 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
767 nvme_setup_flush(ns
, cmd
);
769 case REQ_OP_WRITE_ZEROES
:
770 ret
= nvme_setup_write_zeroes(ns
, req
, cmd
);
773 ret
= nvme_setup_discard(ns
, req
, cmd
);
777 ret
= nvme_setup_rw(ns
, req
, cmd
);
781 return BLK_STS_IOERR
;
784 cmd
->common
.command_id
= req
->tag
;
785 trace_nvme_setup_cmd(req
, cmd
);
788 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
790 static void nvme_end_sync_rq(struct request
*rq
, blk_status_t error
)
792 struct completion
*waiting
= rq
->end_io_data
;
794 rq
->end_io_data
= NULL
;
798 static void nvme_execute_rq_polled(struct request_queue
*q
,
799 struct gendisk
*bd_disk
, struct request
*rq
, int at_head
)
801 DECLARE_COMPLETION_ONSTACK(wait
);
803 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
));
805 rq
->cmd_flags
|= REQ_HIPRI
;
806 rq
->end_io_data
= &wait
;
807 blk_execute_rq_nowait(q
, bd_disk
, rq
, at_head
, nvme_end_sync_rq
);
809 while (!completion_done(&wait
)) {
810 blk_poll(q
, request_to_qc_t(rq
->mq_hctx
, rq
), true);
816 * Returns 0 on success. If the result is negative, it's a Linux error code;
817 * if the result is positive, it's an NVM Express status code
819 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
820 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
821 unsigned timeout
, int qid
, int at_head
,
822 blk_mq_req_flags_t flags
, bool poll
)
827 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
831 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
833 if (buffer
&& bufflen
) {
834 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
840 nvme_execute_rq_polled(req
->q
, NULL
, req
, at_head
);
842 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
844 *result
= nvme_req(req
)->result
;
845 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
848 ret
= nvme_req(req
)->status
;
850 blk_mq_free_request(req
);
853 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
855 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
856 void *buffer
, unsigned bufflen
)
858 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
859 NVME_QID_ANY
, 0, 0, false);
861 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
863 static void *nvme_add_user_metadata(struct bio
*bio
, void __user
*ubuf
,
864 unsigned len
, u32 seed
, bool write
)
866 struct bio_integrity_payload
*bip
;
870 buf
= kmalloc(len
, GFP_KERNEL
);
875 if (write
&& copy_from_user(buf
, ubuf
, len
))
878 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
884 bip
->bip_iter
.bi_size
= len
;
885 bip
->bip_iter
.bi_sector
= seed
;
886 ret
= bio_integrity_add_page(bio
, virt_to_page(buf
), len
,
887 offset_in_page(buf
));
897 static int nvme_submit_user_cmd(struct request_queue
*q
,
898 struct nvme_command
*cmd
, void __user
*ubuffer
,
899 unsigned bufflen
, void __user
*meta_buffer
, unsigned meta_len
,
900 u32 meta_seed
, u64
*result
, unsigned timeout
)
902 bool write
= nvme_is_write(cmd
);
903 struct nvme_ns
*ns
= q
->queuedata
;
904 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
906 struct bio
*bio
= NULL
;
910 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
914 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
915 nvme_req(req
)->flags
|= NVME_REQ_USERCMD
;
917 if (ubuffer
&& bufflen
) {
918 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
924 if (disk
&& meta_buffer
&& meta_len
) {
925 meta
= nvme_add_user_metadata(bio
, meta_buffer
, meta_len
,
931 req
->cmd_flags
|= REQ_INTEGRITY
;
935 blk_execute_rq(req
->q
, disk
, req
, 0);
936 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
939 ret
= nvme_req(req
)->status
;
941 *result
= le64_to_cpu(nvme_req(req
)->result
.u64
);
942 if (meta
&& !ret
&& !write
) {
943 if (copy_to_user(meta_buffer
, meta
, meta_len
))
949 blk_rq_unmap_user(bio
);
951 blk_mq_free_request(req
);
955 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
957 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
959 bool startka
= false;
961 blk_mq_free_request(rq
);
964 dev_err(ctrl
->device
,
965 "failed nvme_keep_alive_end_io error=%d\n",
970 ctrl
->comp_seen
= false;
971 spin_lock_irqsave(&ctrl
->lock
, flags
);
972 if (ctrl
->state
== NVME_CTRL_LIVE
||
973 ctrl
->state
== NVME_CTRL_CONNECTING
)
975 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
977 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
980 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
984 rq
= nvme_alloc_request(ctrl
->admin_q
, &ctrl
->ka_cmd
, BLK_MQ_REQ_RESERVED
,
989 rq
->timeout
= ctrl
->kato
* HZ
;
990 rq
->end_io_data
= ctrl
;
992 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
997 static void nvme_keep_alive_work(struct work_struct
*work
)
999 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
1000 struct nvme_ctrl
, ka_work
);
1001 bool comp_seen
= ctrl
->comp_seen
;
1003 if ((ctrl
->ctratt
& NVME_CTRL_ATTR_TBKAS
) && comp_seen
) {
1004 dev_dbg(ctrl
->device
,
1005 "reschedule traffic based keep-alive timer\n");
1006 ctrl
->comp_seen
= false;
1007 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
1011 if (nvme_keep_alive(ctrl
)) {
1012 /* allocation failure, reset the controller */
1013 dev_err(ctrl
->device
, "keep-alive failed\n");
1014 nvme_reset_ctrl(ctrl
);
1019 static void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
1021 if (unlikely(ctrl
->kato
== 0))
1024 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
1027 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
1029 if (unlikely(ctrl
->kato
== 0))
1032 cancel_delayed_work_sync(&ctrl
->ka_work
);
1034 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
1036 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
1038 struct nvme_command c
= { };
1041 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1042 c
.identify
.opcode
= nvme_admin_identify
;
1043 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
1045 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
1049 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
1050 sizeof(struct nvme_id_ctrl
));
1056 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1057 struct nvme_ns_ids
*ids
)
1059 struct nvme_command c
= { };
1065 c
.identify
.opcode
= nvme_admin_identify
;
1066 c
.identify
.nsid
= cpu_to_le32(nsid
);
1067 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
1069 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
1073 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
1074 NVME_IDENTIFY_DATA_SIZE
);
1078 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
1079 struct nvme_ns_id_desc
*cur
= data
+ pos
;
1084 switch (cur
->nidt
) {
1085 case NVME_NIDT_EUI64
:
1086 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
1087 dev_warn(ctrl
->device
,
1088 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
1092 len
= NVME_NIDT_EUI64_LEN
;
1093 memcpy(ids
->eui64
, data
+ pos
+ sizeof(*cur
), len
);
1095 case NVME_NIDT_NGUID
:
1096 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
1097 dev_warn(ctrl
->device
,
1098 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
1102 len
= NVME_NIDT_NGUID_LEN
;
1103 memcpy(ids
->nguid
, data
+ pos
+ sizeof(*cur
), len
);
1105 case NVME_NIDT_UUID
:
1106 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
1107 dev_warn(ctrl
->device
,
1108 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
1112 len
= NVME_NIDT_UUID_LEN
;
1113 uuid_copy(&ids
->uuid
, data
+ pos
+ sizeof(*cur
));
1116 /* Skip unknown types */
1121 len
+= sizeof(*cur
);
1128 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
1130 struct nvme_command c
= { };
1132 c
.identify
.opcode
= nvme_admin_identify
;
1133 c
.identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
;
1134 c
.identify
.nsid
= cpu_to_le32(nsid
);
1135 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
,
1136 NVME_IDENTIFY_DATA_SIZE
);
1139 static int nvme_identify_ns(struct nvme_ctrl
*ctrl
,
1140 unsigned nsid
, struct nvme_id_ns
**id
)
1142 struct nvme_command c
= { };
1145 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1146 c
.identify
.opcode
= nvme_admin_identify
;
1147 c
.identify
.nsid
= cpu_to_le32(nsid
);
1148 c
.identify
.cns
= NVME_ID_CNS_NS
;
1150 *id
= kmalloc(sizeof(**id
), GFP_KERNEL
);
1154 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, *id
, sizeof(**id
));
1156 dev_warn(ctrl
->device
, "Identify namespace failed (%d)\n", error
);
1163 static int nvme_features(struct nvme_ctrl
*dev
, u8 op
, unsigned int fid
,
1164 unsigned int dword11
, void *buffer
, size_t buflen
, u32
*result
)
1166 struct nvme_command c
;
1167 union nvme_result res
;
1170 memset(&c
, 0, sizeof(c
));
1171 c
.features
.opcode
= op
;
1172 c
.features
.fid
= cpu_to_le32(fid
);
1173 c
.features
.dword11
= cpu_to_le32(dword11
);
1175 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
1176 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0, false);
1177 if (ret
>= 0 && result
)
1178 *result
= le32_to_cpu(res
.u32
);
1182 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1183 unsigned int dword11
, void *buffer
, size_t buflen
,
1186 return nvme_features(dev
, nvme_admin_set_features
, fid
, dword11
, buffer
,
1189 EXPORT_SYMBOL_GPL(nvme_set_features
);
1191 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1192 unsigned int dword11
, void *buffer
, size_t buflen
,
1195 return nvme_features(dev
, nvme_admin_get_features
, fid
, dword11
, buffer
,
1198 EXPORT_SYMBOL_GPL(nvme_get_features
);
1200 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
1202 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
1204 int status
, nr_io_queues
;
1206 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
1212 * Degraded controllers might return an error when setting the queue
1213 * count. We still want to be able to bring them online and offer
1214 * access to the admin queue, as that might be only way to fix them up.
1217 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
1220 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
1221 *count
= min(*count
, nr_io_queues
);
1226 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
1228 #define NVME_AEN_SUPPORTED \
1229 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1230 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1232 static void nvme_enable_aen(struct nvme_ctrl
*ctrl
)
1234 u32 result
, supported_aens
= ctrl
->oaes
& NVME_AEN_SUPPORTED
;
1237 if (!supported_aens
)
1240 status
= nvme_set_features(ctrl
, NVME_FEAT_ASYNC_EVENT
, supported_aens
,
1243 dev_warn(ctrl
->device
, "Failed to configure AEN (cfg %x)\n",
1246 queue_work(nvme_wq
, &ctrl
->async_event_work
);
1249 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
1251 struct nvme_user_io io
;
1252 struct nvme_command c
;
1253 unsigned length
, meta_len
;
1254 void __user
*metadata
;
1256 if (copy_from_user(&io
, uio
, sizeof(io
)))
1261 switch (io
.opcode
) {
1262 case nvme_cmd_write
:
1264 case nvme_cmd_compare
:
1270 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
1271 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
1272 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
1277 } else if (meta_len
) {
1278 if ((io
.metadata
& 3) || !io
.metadata
)
1282 memset(&c
, 0, sizeof(c
));
1283 c
.rw
.opcode
= io
.opcode
;
1284 c
.rw
.flags
= io
.flags
;
1285 c
.rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1286 c
.rw
.slba
= cpu_to_le64(io
.slba
);
1287 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
1288 c
.rw
.control
= cpu_to_le16(io
.control
);
1289 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
1290 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
1291 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
1292 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
1294 return nvme_submit_user_cmd(ns
->queue
, &c
,
1295 (void __user
*)(uintptr_t)io
.addr
, length
,
1296 metadata
, meta_len
, lower_32_bits(io
.slba
), NULL
, 0);
1299 static u32
nvme_known_admin_effects(u8 opcode
)
1302 case nvme_admin_format_nvm
:
1303 return NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
|
1304 NVME_CMD_EFFECTS_CSE_MASK
;
1305 case nvme_admin_sanitize_nvm
:
1306 return NVME_CMD_EFFECTS_CSE_MASK
;
1313 static u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1320 effects
= le32_to_cpu(ctrl
->effects
->iocs
[opcode
]);
1321 if (effects
& ~(NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
))
1322 dev_warn(ctrl
->device
,
1323 "IO command:%02x has unhandled effects:%08x\n",
1329 effects
= le32_to_cpu(ctrl
->effects
->acs
[opcode
]);
1330 effects
|= nvme_known_admin_effects(opcode
);
1333 * For simplicity, IO to all namespaces is quiesced even if the command
1334 * effects say only one namespace is affected.
1336 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
)) {
1337 mutex_lock(&ctrl
->scan_lock
);
1338 mutex_lock(&ctrl
->subsys
->lock
);
1339 nvme_mpath_start_freeze(ctrl
->subsys
);
1340 nvme_mpath_wait_freeze(ctrl
->subsys
);
1341 nvme_start_freeze(ctrl
);
1342 nvme_wait_freeze(ctrl
);
1347 static void nvme_update_formats(struct nvme_ctrl
*ctrl
)
1351 down_read(&ctrl
->namespaces_rwsem
);
1352 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
1353 if (ns
->disk
&& nvme_revalidate_disk(ns
->disk
))
1354 nvme_set_queue_dying(ns
);
1355 up_read(&ctrl
->namespaces_rwsem
);
1358 static void nvme_passthru_end(struct nvme_ctrl
*ctrl
, u32 effects
)
1361 * Revalidate LBA changes prior to unfreezing. This is necessary to
1362 * prevent memory corruption if a logical block size was changed by
1365 if (effects
& NVME_CMD_EFFECTS_LBCC
)
1366 nvme_update_formats(ctrl
);
1367 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
)) {
1368 nvme_unfreeze(ctrl
);
1369 nvme_mpath_unfreeze(ctrl
->subsys
);
1370 mutex_unlock(&ctrl
->subsys
->lock
);
1371 nvme_remove_invalid_namespaces(ctrl
, NVME_NSID_ALL
);
1372 mutex_unlock(&ctrl
->scan_lock
);
1374 if (effects
& NVME_CMD_EFFECTS_CCC
)
1375 nvme_init_identify(ctrl
);
1376 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
))
1377 nvme_queue_scan(ctrl
);
1380 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1381 struct nvme_passthru_cmd __user
*ucmd
)
1383 struct nvme_passthru_cmd cmd
;
1384 struct nvme_command c
;
1385 unsigned timeout
= 0;
1390 if (!capable(CAP_SYS_ADMIN
))
1392 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1397 memset(&c
, 0, sizeof(c
));
1398 c
.common
.opcode
= cmd
.opcode
;
1399 c
.common
.flags
= cmd
.flags
;
1400 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1401 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1402 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1403 c
.common
.cdw10
= cpu_to_le32(cmd
.cdw10
);
1404 c
.common
.cdw11
= cpu_to_le32(cmd
.cdw11
);
1405 c
.common
.cdw12
= cpu_to_le32(cmd
.cdw12
);
1406 c
.common
.cdw13
= cpu_to_le32(cmd
.cdw13
);
1407 c
.common
.cdw14
= cpu_to_le32(cmd
.cdw14
);
1408 c
.common
.cdw15
= cpu_to_le32(cmd
.cdw15
);
1411 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1413 effects
= nvme_passthru_start(ctrl
, ns
, cmd
.opcode
);
1414 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1415 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1416 (void __user
*)(uintptr_t)cmd
.metadata
,
1417 cmd
.metadata_len
, 0, &result
, timeout
);
1418 nvme_passthru_end(ctrl
, effects
);
1421 if (put_user(result
, &ucmd
->result
))
1428 static int nvme_user_cmd64(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1429 struct nvme_passthru_cmd64 __user
*ucmd
)
1431 struct nvme_passthru_cmd64 cmd
;
1432 struct nvme_command c
;
1433 unsigned timeout
= 0;
1437 if (!capable(CAP_SYS_ADMIN
))
1439 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1444 memset(&c
, 0, sizeof(c
));
1445 c
.common
.opcode
= cmd
.opcode
;
1446 c
.common
.flags
= cmd
.flags
;
1447 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1448 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1449 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1450 c
.common
.cdw10
= cpu_to_le32(cmd
.cdw10
);
1451 c
.common
.cdw11
= cpu_to_le32(cmd
.cdw11
);
1452 c
.common
.cdw12
= cpu_to_le32(cmd
.cdw12
);
1453 c
.common
.cdw13
= cpu_to_le32(cmd
.cdw13
);
1454 c
.common
.cdw14
= cpu_to_le32(cmd
.cdw14
);
1455 c
.common
.cdw15
= cpu_to_le32(cmd
.cdw15
);
1458 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1460 effects
= nvme_passthru_start(ctrl
, ns
, cmd
.opcode
);
1461 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1462 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1463 (void __user
*)(uintptr_t)cmd
.metadata
, cmd
.metadata_len
,
1464 0, &cmd
.result
, timeout
);
1465 nvme_passthru_end(ctrl
, effects
);
1468 if (put_user(cmd
.result
, &ucmd
->result
))
1476 * Issue ioctl requests on the first available path. Note that unlike normal
1477 * block layer requests we will not retry failed request on another controller.
1479 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
,
1480 struct nvme_ns_head
**head
, int *srcu_idx
)
1482 #ifdef CONFIG_NVME_MULTIPATH
1483 if (disk
->fops
== &nvme_ns_head_ops
) {
1486 *head
= disk
->private_data
;
1487 *srcu_idx
= srcu_read_lock(&(*head
)->srcu
);
1488 ns
= nvme_find_path(*head
);
1490 srcu_read_unlock(&(*head
)->srcu
, *srcu_idx
);
1496 return disk
->private_data
;
1499 static void nvme_put_ns_from_disk(struct nvme_ns_head
*head
, int idx
)
1502 srcu_read_unlock(&head
->srcu
, idx
);
1505 static bool is_ctrl_ioctl(unsigned int cmd
)
1507 if (cmd
== NVME_IOCTL_ADMIN_CMD
|| cmd
== NVME_IOCTL_ADMIN64_CMD
)
1509 if (is_sed_ioctl(cmd
))
1514 static int nvme_handle_ctrl_ioctl(struct nvme_ns
*ns
, unsigned int cmd
,
1516 struct nvme_ns_head
*head
,
1519 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1522 nvme_get_ctrl(ns
->ctrl
);
1523 nvme_put_ns_from_disk(head
, srcu_idx
);
1526 case NVME_IOCTL_ADMIN_CMD
:
1527 ret
= nvme_user_cmd(ctrl
, NULL
, argp
);
1529 case NVME_IOCTL_ADMIN64_CMD
:
1530 ret
= nvme_user_cmd64(ctrl
, NULL
, argp
);
1533 ret
= sed_ioctl(ctrl
->opal_dev
, cmd
, argp
);
1536 nvme_put_ctrl(ctrl
);
1540 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
1541 unsigned int cmd
, unsigned long arg
)
1543 struct nvme_ns_head
*head
= NULL
;
1544 void __user
*argp
= (void __user
*)arg
;
1548 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
1550 return -EWOULDBLOCK
;
1553 * Handle ioctls that apply to the controller instead of the namespace
1554 * seperately and drop the ns SRCU reference early. This avoids a
1555 * deadlock when deleting namespaces using the passthrough interface.
1557 if (is_ctrl_ioctl(cmd
))
1558 return nvme_handle_ctrl_ioctl(ns
, cmd
, argp
, head
, srcu_idx
);
1562 force_successful_syscall_return();
1563 ret
= ns
->head
->ns_id
;
1565 case NVME_IOCTL_IO_CMD
:
1566 ret
= nvme_user_cmd(ns
->ctrl
, ns
, argp
);
1568 case NVME_IOCTL_SUBMIT_IO
:
1569 ret
= nvme_submit_io(ns
, argp
);
1571 case NVME_IOCTL_IO64_CMD
:
1572 ret
= nvme_user_cmd64(ns
->ctrl
, ns
, argp
);
1576 ret
= nvme_nvm_ioctl(ns
, cmd
, arg
);
1581 nvme_put_ns_from_disk(head
, srcu_idx
);
1585 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1587 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1589 #ifdef CONFIG_NVME_MULTIPATH
1590 /* should never be called due to GENHD_FL_HIDDEN */
1591 if (WARN_ON_ONCE(ns
->head
->disk
))
1594 if (!kref_get_unless_zero(&ns
->kref
))
1596 if (!try_module_get(ns
->ctrl
->ops
->module
))
1607 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1609 struct nvme_ns
*ns
= disk
->private_data
;
1611 module_put(ns
->ctrl
->ops
->module
);
1615 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1617 /* some standard values */
1618 geo
->heads
= 1 << 6;
1619 geo
->sectors
= 1 << 5;
1620 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1624 #ifdef CONFIG_BLK_DEV_INTEGRITY
1625 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1627 struct blk_integrity integrity
;
1629 memset(&integrity
, 0, sizeof(integrity
));
1631 case NVME_NS_DPS_PI_TYPE3
:
1632 integrity
.profile
= &t10_pi_type3_crc
;
1633 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1634 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1636 case NVME_NS_DPS_PI_TYPE1
:
1637 case NVME_NS_DPS_PI_TYPE2
:
1638 integrity
.profile
= &t10_pi_type1_crc
;
1639 integrity
.tag_size
= sizeof(u16
);
1640 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1643 integrity
.profile
= NULL
;
1646 integrity
.tuple_size
= ms
;
1647 blk_integrity_register(disk
, &integrity
);
1648 blk_queue_max_integrity_segments(disk
->queue
, 1);
1651 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1654 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1656 static void nvme_set_chunk_size(struct nvme_ns
*ns
)
1658 u32 chunk_size
= nvme_lba_to_sect(ns
, ns
->noiob
);
1659 blk_queue_chunk_sectors(ns
->queue
, rounddown_pow_of_two(chunk_size
));
1662 static void nvme_config_discard(struct gendisk
*disk
, struct nvme_ns
*ns
)
1664 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1665 struct request_queue
*queue
= disk
->queue
;
1666 u32 size
= queue_logical_block_size(queue
);
1668 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)) {
1669 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, queue
);
1673 if (ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
)
1674 size
*= ns
->sws
* ns
->sgs
;
1676 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1677 NVME_DSM_MAX_RANGES
);
1679 queue
->limits
.discard_alignment
= 0;
1680 queue
->limits
.discard_granularity
= size
;
1682 /* If discard is already enabled, don't reset queue limits */
1683 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD
, queue
))
1686 blk_queue_max_discard_sectors(queue
, UINT_MAX
);
1687 blk_queue_max_discard_segments(queue
, NVME_DSM_MAX_RANGES
);
1689 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1690 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1693 static void nvme_config_write_zeroes(struct gendisk
*disk
, struct nvme_ns
*ns
)
1697 if (!(ns
->ctrl
->oncs
& NVME_CTRL_ONCS_WRITE_ZEROES
) ||
1698 (ns
->ctrl
->quirks
& NVME_QUIRK_DISABLE_WRITE_ZEROES
))
1701 * Even though NVMe spec explicitly states that MDTS is not
1702 * applicable to the write-zeroes:- "The restriction does not apply to
1703 * commands that do not transfer data between the host and the
1704 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1705 * In order to be more cautious use controller's max_hw_sectors value
1706 * to configure the maximum sectors for the write-zeroes which is
1707 * configured based on the controller's MDTS field in the
1708 * nvme_init_identify() if available.
1710 if (ns
->ctrl
->max_hw_sectors
== UINT_MAX
)
1711 max_blocks
= (u64
)USHRT_MAX
+ 1;
1713 max_blocks
= ns
->ctrl
->max_hw_sectors
+ 1;
1715 blk_queue_max_write_zeroes_sectors(disk
->queue
,
1716 nvme_lba_to_sect(ns
, max_blocks
));
1719 static int nvme_report_ns_ids(struct nvme_ctrl
*ctrl
, unsigned int nsid
,
1720 struct nvme_id_ns
*id
, struct nvme_ns_ids
*ids
)
1724 memset(ids
, 0, sizeof(*ids
));
1726 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1727 memcpy(ids
->eui64
, id
->eui64
, sizeof(id
->eui64
));
1728 if (ctrl
->vs
>= NVME_VS(1, 2, 0))
1729 memcpy(ids
->nguid
, id
->nguid
, sizeof(id
->nguid
));
1730 if (ctrl
->vs
>= NVME_VS(1, 3, 0)) {
1731 /* Don't treat error as fatal we potentially
1732 * already have a NGUID or EUI-64
1734 ret
= nvme_identify_ns_descs(ctrl
, nsid
, ids
);
1736 dev_warn(ctrl
->device
,
1737 "Identify Descriptors failed (%d)\n", ret
);
1742 static bool nvme_ns_ids_valid(struct nvme_ns_ids
*ids
)
1744 return !uuid_is_null(&ids
->uuid
) ||
1745 memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)) ||
1746 memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
));
1749 static bool nvme_ns_ids_equal(struct nvme_ns_ids
*a
, struct nvme_ns_ids
*b
)
1751 return uuid_equal(&a
->uuid
, &b
->uuid
) &&
1752 memcmp(&a
->nguid
, &b
->nguid
, sizeof(a
->nguid
)) == 0 &&
1753 memcmp(&a
->eui64
, &b
->eui64
, sizeof(a
->eui64
)) == 0;
1756 static void nvme_update_disk_info(struct gendisk
*disk
,
1757 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1759 sector_t capacity
= nvme_lba_to_sect(ns
, le64_to_cpu(id
->nsze
));
1760 unsigned short bs
= 1 << ns
->lba_shift
;
1761 u32 atomic_bs
, phys_bs
, io_opt
;
1763 if (ns
->lba_shift
> PAGE_SHIFT
) {
1764 /* unsupported block size, set capacity to 0 later */
1767 blk_mq_freeze_queue(disk
->queue
);
1768 blk_integrity_unregister(disk
);
1770 if (id
->nabo
== 0) {
1772 * Bit 1 indicates whether NAWUPF is defined for this namespace
1773 * and whether it should be used instead of AWUPF. If NAWUPF ==
1774 * 0 then AWUPF must be used instead.
1776 if (id
->nsfeat
& (1 << 1) && id
->nawupf
)
1777 atomic_bs
= (1 + le16_to_cpu(id
->nawupf
)) * bs
;
1779 atomic_bs
= (1 + ns
->ctrl
->subsys
->awupf
) * bs
;
1785 if (id
->nsfeat
& (1 << 4)) {
1786 /* NPWG = Namespace Preferred Write Granularity */
1787 phys_bs
*= 1 + le16_to_cpu(id
->npwg
);
1788 /* NOWS = Namespace Optimal Write Size */
1789 io_opt
*= 1 + le16_to_cpu(id
->nows
);
1792 blk_queue_logical_block_size(disk
->queue
, bs
);
1794 * Linux filesystems assume writing a single physical block is
1795 * an atomic operation. Hence limit the physical block size to the
1796 * value of the Atomic Write Unit Power Fail parameter.
1798 blk_queue_physical_block_size(disk
->queue
, min(phys_bs
, atomic_bs
));
1799 blk_queue_io_min(disk
->queue
, phys_bs
);
1800 blk_queue_io_opt(disk
->queue
, io_opt
);
1802 if (ns
->ms
&& !ns
->ext
&&
1803 (ns
->ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
1804 nvme_init_integrity(disk
, ns
->ms
, ns
->pi_type
);
1805 if ((ns
->ms
&& !nvme_ns_has_pi(ns
) && !blk_get_integrity(disk
)) ||
1806 ns
->lba_shift
> PAGE_SHIFT
)
1809 set_capacity(disk
, capacity
);
1811 nvme_config_discard(disk
, ns
);
1812 nvme_config_write_zeroes(disk
, ns
);
1814 if (id
->nsattr
& (1 << 0))
1815 set_disk_ro(disk
, true);
1817 set_disk_ro(disk
, false);
1819 blk_mq_unfreeze_queue(disk
->queue
);
1822 static void __nvme_revalidate_disk(struct gendisk
*disk
, struct nvme_id_ns
*id
)
1824 struct nvme_ns
*ns
= disk
->private_data
;
1827 * If identify namespace failed, use default 512 byte block size so
1828 * block layer can use before failing read/write for 0 capacity.
1830 ns
->lba_shift
= id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ds
;
1831 if (ns
->lba_shift
== 0)
1833 ns
->noiob
= le16_to_cpu(id
->noiob
);
1834 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1835 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
1836 /* the PI implementation requires metadata equal t10 pi tuple size */
1837 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1838 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1843 nvme_set_chunk_size(ns
);
1844 nvme_update_disk_info(disk
, ns
, id
);
1845 #ifdef CONFIG_NVME_MULTIPATH
1846 if (ns
->head
->disk
) {
1847 nvme_update_disk_info(ns
->head
->disk
, ns
, id
);
1848 blk_queue_stack_limits(ns
->head
->disk
->queue
, ns
->queue
);
1849 revalidate_disk(ns
->head
->disk
);
1854 static int nvme_revalidate_disk(struct gendisk
*disk
)
1856 struct nvme_ns
*ns
= disk
->private_data
;
1857 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1858 struct nvme_id_ns
*id
;
1859 struct nvme_ns_ids ids
;
1862 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
1863 set_capacity(disk
, 0);
1867 ret
= nvme_identify_ns(ctrl
, ns
->head
->ns_id
, &id
);
1871 if (id
->ncap
== 0) {
1876 __nvme_revalidate_disk(disk
, id
);
1877 ret
= nvme_report_ns_ids(ctrl
, ns
->head
->ns_id
, id
, &ids
);
1881 if (!nvme_ns_ids_equal(&ns
->head
->ids
, &ids
)) {
1882 dev_err(ctrl
->device
,
1883 "identifiers changed for nsid %d\n", ns
->head
->ns_id
);
1891 * Only fail the function if we got a fatal error back from the
1892 * device, otherwise ignore the error and just move on.
1894 if (ret
== -ENOMEM
|| (ret
> 0 && !(ret
& NVME_SC_DNR
)))
1897 ret
= blk_status_to_errno(nvme_error_status(ret
));
1901 static char nvme_pr_type(enum pr_type type
)
1904 case PR_WRITE_EXCLUSIVE
:
1906 case PR_EXCLUSIVE_ACCESS
:
1908 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1910 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1912 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1914 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1921 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1922 u64 key
, u64 sa_key
, u8 op
)
1924 struct nvme_ns_head
*head
= NULL
;
1926 struct nvme_command c
;
1928 u8 data
[16] = { 0, };
1930 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
1932 return -EWOULDBLOCK
;
1934 put_unaligned_le64(key
, &data
[0]);
1935 put_unaligned_le64(sa_key
, &data
[8]);
1937 memset(&c
, 0, sizeof(c
));
1938 c
.common
.opcode
= op
;
1939 c
.common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1940 c
.common
.cdw10
= cpu_to_le32(cdw10
);
1942 ret
= nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1943 nvme_put_ns_from_disk(head
, srcu_idx
);
1947 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1948 u64
new, unsigned flags
)
1952 if (flags
& ~PR_FL_IGNORE_KEY
)
1955 cdw10
= old
? 2 : 0;
1956 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1957 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1958 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1961 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1962 enum pr_type type
, unsigned flags
)
1966 if (flags
& ~PR_FL_IGNORE_KEY
)
1969 cdw10
= nvme_pr_type(type
) << 8;
1970 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1971 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1974 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1975 enum pr_type type
, bool abort
)
1977 u32 cdw10
= nvme_pr_type(type
) << 8 | (abort
? 2 : 1);
1978 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1981 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1983 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1984 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1987 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1989 u32 cdw10
= nvme_pr_type(type
) << 8 | (key
? 1 << 3 : 0);
1990 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1993 static const struct pr_ops nvme_pr_ops
= {
1994 .pr_register
= nvme_pr_register
,
1995 .pr_reserve
= nvme_pr_reserve
,
1996 .pr_release
= nvme_pr_release
,
1997 .pr_preempt
= nvme_pr_preempt
,
1998 .pr_clear
= nvme_pr_clear
,
2001 #ifdef CONFIG_BLK_SED_OPAL
2002 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
2005 struct nvme_ctrl
*ctrl
= data
;
2006 struct nvme_command cmd
;
2008 memset(&cmd
, 0, sizeof(cmd
));
2010 cmd
.common
.opcode
= nvme_admin_security_send
;
2012 cmd
.common
.opcode
= nvme_admin_security_recv
;
2013 cmd
.common
.nsid
= 0;
2014 cmd
.common
.cdw10
= cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
2015 cmd
.common
.cdw11
= cpu_to_le32(len
);
2017 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
2018 ADMIN_TIMEOUT
, NVME_QID_ANY
, 1, 0, false);
2020 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
2021 #endif /* CONFIG_BLK_SED_OPAL */
2023 static const struct block_device_operations nvme_fops
= {
2024 .owner
= THIS_MODULE
,
2025 .ioctl
= nvme_ioctl
,
2026 .compat_ioctl
= nvme_ioctl
,
2028 .release
= nvme_release
,
2029 .getgeo
= nvme_getgeo
,
2030 .revalidate_disk
= nvme_revalidate_disk
,
2031 .pr_ops
= &nvme_pr_ops
,
2034 #ifdef CONFIG_NVME_MULTIPATH
2035 static int nvme_ns_head_open(struct block_device
*bdev
, fmode_t mode
)
2037 struct nvme_ns_head
*head
= bdev
->bd_disk
->private_data
;
2039 if (!kref_get_unless_zero(&head
->ref
))
2044 static void nvme_ns_head_release(struct gendisk
*disk
, fmode_t mode
)
2046 nvme_put_ns_head(disk
->private_data
);
2049 const struct block_device_operations nvme_ns_head_ops
= {
2050 .owner
= THIS_MODULE
,
2051 .open
= nvme_ns_head_open
,
2052 .release
= nvme_ns_head_release
,
2053 .ioctl
= nvme_ioctl
,
2054 .compat_ioctl
= nvme_ioctl
,
2055 .getgeo
= nvme_getgeo
,
2056 .pr_ops
= &nvme_pr_ops
,
2058 #endif /* CONFIG_NVME_MULTIPATH */
2060 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
2062 unsigned long timeout
=
2063 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
2064 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
2067 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2070 if ((csts
& NVME_CSTS_RDY
) == bit
)
2074 if (fatal_signal_pending(current
))
2076 if (time_after(jiffies
, timeout
)) {
2077 dev_err(ctrl
->device
,
2078 "Device not ready; aborting %s\n", enabled
?
2079 "initialisation" : "reset");
2088 * If the device has been passed off to us in an enabled state, just clear
2089 * the enabled bit. The spec says we should set the 'shutdown notification
2090 * bits', but doing so may cause the device to complete commands to the
2091 * admin queue ... and we don't know what memory that might be pointing at!
2093 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
)
2097 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2098 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
2100 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2104 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
2105 msleep(NVME_QUIRK_DELAY_AMOUNT
);
2107 return nvme_wait_ready(ctrl
, ctrl
->cap
, false);
2109 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
2111 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
)
2114 * Default to a 4K page size, with the intention to update this
2115 * path in the future to accomodate architectures with differing
2116 * kernel and IO page sizes.
2118 unsigned dev_page_min
, page_shift
= 12;
2121 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
2123 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2126 dev_page_min
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2128 if (page_shift
< dev_page_min
) {
2129 dev_err(ctrl
->device
,
2130 "Minimum device page size %u too large for host (%u)\n",
2131 1 << dev_page_min
, 1 << page_shift
);
2135 ctrl
->page_size
= 1 << page_shift
;
2137 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
2138 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
2139 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
2140 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
2141 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
2143 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2146 return nvme_wait_ready(ctrl
, ctrl
->cap
, true);
2148 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
2150 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
2152 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
2156 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2157 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
2159 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2163 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2164 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
2168 if (fatal_signal_pending(current
))
2170 if (time_after(jiffies
, timeout
)) {
2171 dev_err(ctrl
->device
,
2172 "Device shutdown incomplete; abort shutdown\n");
2179 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
2181 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
2182 struct request_queue
*q
)
2186 if (ctrl
->max_hw_sectors
) {
2188 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
2190 max_segments
= min_not_zero(max_segments
, ctrl
->max_segments
);
2191 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
2192 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
2194 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) &&
2195 is_power_of_2(ctrl
->max_hw_sectors
))
2196 blk_queue_chunk_sectors(q
, ctrl
->max_hw_sectors
);
2197 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
2198 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
2200 blk_queue_write_cache(q
, vwc
, vwc
);
2203 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
2208 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
2211 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
2212 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
2215 dev_warn_once(ctrl
->device
,
2216 "could not set timestamp (%d)\n", ret
);
2220 static int nvme_configure_acre(struct nvme_ctrl
*ctrl
)
2222 struct nvme_feat_host_behavior
*host
;
2225 /* Don't bother enabling the feature if retry delay is not reported */
2229 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
2233 host
->acre
= NVME_ENABLE_ACRE
;
2234 ret
= nvme_set_features(ctrl
, NVME_FEAT_HOST_BEHAVIOR
, 0,
2235 host
, sizeof(*host
), NULL
);
2240 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
2243 * APST (Autonomous Power State Transition) lets us program a
2244 * table of power state transitions that the controller will
2245 * perform automatically. We configure it with a simple
2246 * heuristic: we are willing to spend at most 2% of the time
2247 * transitioning between power states. Therefore, when running
2248 * in any given state, we will enter the next lower-power
2249 * non-operational state after waiting 50 * (enlat + exlat)
2250 * microseconds, as long as that state's exit latency is under
2251 * the requested maximum latency.
2253 * We will not autonomously enter any non-operational state for
2254 * which the total latency exceeds ps_max_latency_us. Users
2255 * can set ps_max_latency_us to zero to turn off APST.
2259 struct nvme_feat_auto_pst
*table
;
2265 * If APST isn't supported or if we haven't been initialized yet,
2266 * then don't do anything.
2271 if (ctrl
->npss
> 31) {
2272 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
2276 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2280 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
2281 /* Turn off APST. */
2283 dev_dbg(ctrl
->device
, "APST disabled\n");
2285 __le64 target
= cpu_to_le64(0);
2289 * Walk through all states from lowest- to highest-power.
2290 * According to the spec, lower-numbered states use more
2291 * power. NPSS, despite the name, is the index of the
2292 * lowest-power state, not the number of states.
2294 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
2295 u64 total_latency_us
, exit_latency_us
, transition_ms
;
2298 table
->entries
[state
] = target
;
2301 * Don't allow transitions to the deepest state
2302 * if it's quirked off.
2304 if (state
== ctrl
->npss
&&
2305 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
2309 * Is this state a useful non-operational state for
2310 * higher-power states to autonomously transition to?
2312 if (!(ctrl
->psd
[state
].flags
&
2313 NVME_PS_FLAGS_NON_OP_STATE
))
2317 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
2318 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
2323 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
2326 * This state is good. Use it as the APST idle
2327 * target for higher power states.
2329 transition_ms
= total_latency_us
+ 19;
2330 do_div(transition_ms
, 20);
2331 if (transition_ms
> (1 << 24) - 1)
2332 transition_ms
= (1 << 24) - 1;
2334 target
= cpu_to_le64((state
<< 3) |
2335 (transition_ms
<< 8));
2340 if (total_latency_us
> max_lat_us
)
2341 max_lat_us
= total_latency_us
;
2347 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
2349 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2350 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
2354 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
2355 table
, sizeof(*table
), NULL
);
2357 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
2363 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
2365 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2369 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
2370 case PM_QOS_LATENCY_ANY
:
2378 if (ctrl
->ps_max_latency_us
!= latency
) {
2379 ctrl
->ps_max_latency_us
= latency
;
2380 nvme_configure_apst(ctrl
);
2384 struct nvme_core_quirk_entry
{
2386 * NVMe model and firmware strings are padded with spaces. For
2387 * simplicity, strings in the quirk table are padded with NULLs
2393 unsigned long quirks
;
2396 static const struct nvme_core_quirk_entry core_quirks
[] = {
2399 * This Toshiba device seems to die using any APST states. See:
2400 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2403 .mn
= "THNSF5256GPUK TOSHIBA",
2404 .quirks
= NVME_QUIRK_NO_APST
,
2408 * This LiteON CL1-3D*-Q11 firmware version has a race
2409 * condition associated with actions related to suspend to idle
2410 * LiteON has resolved the problem in future firmware
2414 .quirks
= NVME_QUIRK_SIMPLE_SUSPEND
,
2418 /* match is null-terminated but idstr is space-padded. */
2419 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
2426 matchlen
= strlen(match
);
2427 WARN_ON_ONCE(matchlen
> len
);
2429 if (memcmp(idstr
, match
, matchlen
))
2432 for (; matchlen
< len
; matchlen
++)
2433 if (idstr
[matchlen
] != ' ')
2439 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
2440 const struct nvme_core_quirk_entry
*q
)
2442 return q
->vid
== le16_to_cpu(id
->vid
) &&
2443 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
2444 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
2447 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
2448 struct nvme_id_ctrl
*id
)
2453 if(!(ctrl
->quirks
& NVME_QUIRK_IGNORE_DEV_SUBNQN
)) {
2454 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
2455 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
2456 strlcpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
2460 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
2461 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
2464 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2465 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
2466 "nqn.2014.08.org.nvmexpress:%04x%04x",
2467 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
2468 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
2469 off
+= sizeof(id
->sn
);
2470 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
2471 off
+= sizeof(id
->mn
);
2472 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
2475 static void nvme_release_subsystem(struct device
*dev
)
2477 struct nvme_subsystem
*subsys
=
2478 container_of(dev
, struct nvme_subsystem
, dev
);
2480 if (subsys
->instance
>= 0)
2481 ida_simple_remove(&nvme_instance_ida
, subsys
->instance
);
2485 static void nvme_destroy_subsystem(struct kref
*ref
)
2487 struct nvme_subsystem
*subsys
=
2488 container_of(ref
, struct nvme_subsystem
, ref
);
2490 mutex_lock(&nvme_subsystems_lock
);
2491 list_del(&subsys
->entry
);
2492 mutex_unlock(&nvme_subsystems_lock
);
2494 ida_destroy(&subsys
->ns_ida
);
2495 device_del(&subsys
->dev
);
2496 put_device(&subsys
->dev
);
2499 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
2501 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
2504 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
2506 struct nvme_subsystem
*subsys
;
2508 lockdep_assert_held(&nvme_subsystems_lock
);
2511 * Fail matches for discovery subsystems. This results
2512 * in each discovery controller bound to a unique subsystem.
2513 * This avoids issues with validating controller values
2514 * that can only be true when there is a single unique subsystem.
2515 * There may be multiple and completely independent entities
2516 * that provide discovery controllers.
2518 if (!strcmp(subsysnqn
, NVME_DISC_SUBSYS_NAME
))
2521 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
2522 if (strcmp(subsys
->subnqn
, subsysnqn
))
2524 if (!kref_get_unless_zero(&subsys
->ref
))
2532 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
2533 struct device_attribute subsys_attr_##_name = \
2534 __ATTR(_name, _mode, _show, NULL)
2536 static ssize_t
nvme_subsys_show_nqn(struct device
*dev
,
2537 struct device_attribute
*attr
,
2540 struct nvme_subsystem
*subsys
=
2541 container_of(dev
, struct nvme_subsystem
, dev
);
2543 return snprintf(buf
, PAGE_SIZE
, "%s\n", subsys
->subnqn
);
2545 static SUBSYS_ATTR_RO(subsysnqn
, S_IRUGO
, nvme_subsys_show_nqn
);
2547 #define nvme_subsys_show_str_function(field) \
2548 static ssize_t subsys_##field##_show(struct device *dev, \
2549 struct device_attribute *attr, char *buf) \
2551 struct nvme_subsystem *subsys = \
2552 container_of(dev, struct nvme_subsystem, dev); \
2553 return sprintf(buf, "%.*s\n", \
2554 (int)sizeof(subsys->field), subsys->field); \
2556 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2558 nvme_subsys_show_str_function(model
);
2559 nvme_subsys_show_str_function(serial
);
2560 nvme_subsys_show_str_function(firmware_rev
);
2562 static struct attribute
*nvme_subsys_attrs
[] = {
2563 &subsys_attr_model
.attr
,
2564 &subsys_attr_serial
.attr
,
2565 &subsys_attr_firmware_rev
.attr
,
2566 &subsys_attr_subsysnqn
.attr
,
2567 #ifdef CONFIG_NVME_MULTIPATH
2568 &subsys_attr_iopolicy
.attr
,
2573 static struct attribute_group nvme_subsys_attrs_group
= {
2574 .attrs
= nvme_subsys_attrs
,
2577 static const struct attribute_group
*nvme_subsys_attrs_groups
[] = {
2578 &nvme_subsys_attrs_group
,
2582 static bool nvme_validate_cntlid(struct nvme_subsystem
*subsys
,
2583 struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2585 struct nvme_ctrl
*tmp
;
2587 lockdep_assert_held(&nvme_subsystems_lock
);
2589 list_for_each_entry(tmp
, &subsys
->ctrls
, subsys_entry
) {
2590 if (tmp
->state
== NVME_CTRL_DELETING
||
2591 tmp
->state
== NVME_CTRL_DEAD
)
2594 if (tmp
->cntlid
== ctrl
->cntlid
) {
2595 dev_err(ctrl
->device
,
2596 "Duplicate cntlid %u with %s, rejecting\n",
2597 ctrl
->cntlid
, dev_name(tmp
->device
));
2601 if ((id
->cmic
& (1 << 1)) ||
2602 (ctrl
->opts
&& ctrl
->opts
->discovery_nqn
))
2605 dev_err(ctrl
->device
,
2606 "Subsystem does not support multiple controllers\n");
2613 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2615 struct nvme_subsystem
*subsys
, *found
;
2618 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
2622 subsys
->instance
= -1;
2623 mutex_init(&subsys
->lock
);
2624 kref_init(&subsys
->ref
);
2625 INIT_LIST_HEAD(&subsys
->ctrls
);
2626 INIT_LIST_HEAD(&subsys
->nsheads
);
2627 nvme_init_subnqn(subsys
, ctrl
, id
);
2628 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
2629 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
2630 memcpy(subsys
->firmware_rev
, id
->fr
, sizeof(subsys
->firmware_rev
));
2631 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
2632 subsys
->cmic
= id
->cmic
;
2633 subsys
->awupf
= le16_to_cpu(id
->awupf
);
2634 #ifdef CONFIG_NVME_MULTIPATH
2635 subsys
->iopolicy
= NVME_IOPOLICY_NUMA
;
2638 subsys
->dev
.class = nvme_subsys_class
;
2639 subsys
->dev
.release
= nvme_release_subsystem
;
2640 subsys
->dev
.groups
= nvme_subsys_attrs_groups
;
2641 dev_set_name(&subsys
->dev
, "nvme-subsys%d", ctrl
->instance
);
2642 device_initialize(&subsys
->dev
);
2644 mutex_lock(&nvme_subsystems_lock
);
2645 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
2647 put_device(&subsys
->dev
);
2650 if (!nvme_validate_cntlid(subsys
, ctrl
, id
)) {
2652 goto out_put_subsystem
;
2655 ret
= device_add(&subsys
->dev
);
2657 dev_err(ctrl
->device
,
2658 "failed to register subsystem device.\n");
2659 put_device(&subsys
->dev
);
2662 ida_init(&subsys
->ns_ida
);
2663 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
2666 ret
= sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
2667 dev_name(ctrl
->device
));
2669 dev_err(ctrl
->device
,
2670 "failed to create sysfs link from subsystem.\n");
2671 goto out_put_subsystem
;
2675 subsys
->instance
= ctrl
->instance
;
2676 ctrl
->subsys
= subsys
;
2677 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
2678 mutex_unlock(&nvme_subsystems_lock
);
2682 nvme_put_subsystem(subsys
);
2684 mutex_unlock(&nvme_subsystems_lock
);
2688 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
,
2689 void *log
, size_t size
, u64 offset
)
2691 struct nvme_command c
= { };
2692 unsigned long dwlen
= size
/ 4 - 1;
2694 c
.get_log_page
.opcode
= nvme_admin_get_log_page
;
2695 c
.get_log_page
.nsid
= cpu_to_le32(nsid
);
2696 c
.get_log_page
.lid
= log_page
;
2697 c
.get_log_page
.lsp
= lsp
;
2698 c
.get_log_page
.numdl
= cpu_to_le16(dwlen
& ((1 << 16) - 1));
2699 c
.get_log_page
.numdu
= cpu_to_le16(dwlen
>> 16);
2700 c
.get_log_page
.lpol
= cpu_to_le32(lower_32_bits(offset
));
2701 c
.get_log_page
.lpou
= cpu_to_le32(upper_32_bits(offset
));
2703 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
2706 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
)
2711 ctrl
->effects
= kzalloc(sizeof(*ctrl
->effects
), GFP_KERNEL
);
2716 ret
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CMD_EFFECTS
, 0,
2717 ctrl
->effects
, sizeof(*ctrl
->effects
), 0);
2719 kfree(ctrl
->effects
);
2720 ctrl
->effects
= NULL
;
2726 * Initialize the cached copies of the Identify data and various controller
2727 * register in our nvme_ctrl structure. This should be called as soon as
2728 * the admin queue is fully up and running.
2730 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
2732 struct nvme_id_ctrl
*id
;
2733 int ret
, page_shift
;
2735 bool prev_apst_enabled
;
2737 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
2739 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
2742 page_shift
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2743 ctrl
->sqsize
= min_t(int, NVME_CAP_MQES(ctrl
->cap
), ctrl
->sqsize
);
2745 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
2746 ctrl
->subsystem
= NVME_CAP_NSSRC(ctrl
->cap
);
2748 ret
= nvme_identify_ctrl(ctrl
, &id
);
2750 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
2754 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
2755 ret
= nvme_get_effects_log(ctrl
);
2760 if (!(ctrl
->ops
->flags
& NVME_F_FABRICS
))
2761 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
2763 if (!ctrl
->identified
) {
2766 ret
= nvme_init_subsystem(ctrl
, id
);
2771 * Check for quirks. Quirk can depend on firmware version,
2772 * so, in principle, the set of quirks present can change
2773 * across a reset. As a possible future enhancement, we
2774 * could re-scan for quirks every time we reinitialize
2775 * the device, but we'd have to make sure that the driver
2776 * behaves intelligently if the quirks change.
2778 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
2779 if (quirk_matches(id
, &core_quirks
[i
]))
2780 ctrl
->quirks
|= core_quirks
[i
].quirks
;
2784 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
2785 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2786 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
2789 ctrl
->crdt
[0] = le16_to_cpu(id
->crdt1
);
2790 ctrl
->crdt
[1] = le16_to_cpu(id
->crdt2
);
2791 ctrl
->crdt
[2] = le16_to_cpu(id
->crdt3
);
2793 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
2794 ctrl
->oncs
= le16_to_cpu(id
->oncs
);
2795 ctrl
->mtfa
= le16_to_cpu(id
->mtfa
);
2796 ctrl
->oaes
= le32_to_cpu(id
->oaes
);
2797 ctrl
->wctemp
= le16_to_cpu(id
->wctemp
);
2798 ctrl
->cctemp
= le16_to_cpu(id
->cctemp
);
2800 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
2801 ctrl
->vwc
= id
->vwc
;
2803 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
2805 max_hw_sectors
= UINT_MAX
;
2806 ctrl
->max_hw_sectors
=
2807 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
2809 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
2810 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
2811 ctrl
->kas
= le16_to_cpu(id
->kas
);
2812 ctrl
->max_namespaces
= le32_to_cpu(id
->mnan
);
2813 ctrl
->ctratt
= le32_to_cpu(id
->ctratt
);
2817 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / 1000000;
2819 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
2820 shutdown_timeout
, 60);
2822 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
2823 dev_info(ctrl
->device
,
2824 "Shutdown timeout set to %u seconds\n",
2825 ctrl
->shutdown_timeout
);
2827 ctrl
->shutdown_timeout
= shutdown_timeout
;
2829 ctrl
->npss
= id
->npss
;
2830 ctrl
->apsta
= id
->apsta
;
2831 prev_apst_enabled
= ctrl
->apst_enabled
;
2832 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
2833 if (force_apst
&& id
->apsta
) {
2834 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2835 ctrl
->apst_enabled
= true;
2837 ctrl
->apst_enabled
= false;
2840 ctrl
->apst_enabled
= id
->apsta
;
2842 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
2844 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
2845 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
2846 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
2847 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
2848 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
2851 * In fabrics we need to verify the cntlid matches the
2854 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
2859 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
2860 dev_err(ctrl
->device
,
2861 "keep-alive support is mandatory for fabrics\n");
2866 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
2867 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
2868 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
2869 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
2872 ret
= nvme_mpath_init(ctrl
, id
);
2878 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
2879 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
2880 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
2881 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
2883 ret
= nvme_configure_apst(ctrl
);
2887 ret
= nvme_configure_timestamp(ctrl
);
2891 ret
= nvme_configure_directives(ctrl
);
2895 ret
= nvme_configure_acre(ctrl
);
2899 if (!ctrl
->identified
)
2900 nvme_hwmon_init(ctrl
);
2902 ctrl
->identified
= true;
2910 EXPORT_SYMBOL_GPL(nvme_init_identify
);
2912 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
2914 struct nvme_ctrl
*ctrl
=
2915 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
2917 switch (ctrl
->state
) {
2918 case NVME_CTRL_LIVE
:
2921 return -EWOULDBLOCK
;
2924 file
->private_data
= ctrl
;
2928 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
2933 down_read(&ctrl
->namespaces_rwsem
);
2934 if (list_empty(&ctrl
->namespaces
)) {
2939 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
2940 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
2941 dev_warn(ctrl
->device
,
2942 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
2947 dev_warn(ctrl
->device
,
2948 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
2949 kref_get(&ns
->kref
);
2950 up_read(&ctrl
->namespaces_rwsem
);
2952 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
2957 up_read(&ctrl
->namespaces_rwsem
);
2961 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
2964 struct nvme_ctrl
*ctrl
= file
->private_data
;
2965 void __user
*argp
= (void __user
*)arg
;
2968 case NVME_IOCTL_ADMIN_CMD
:
2969 return nvme_user_cmd(ctrl
, NULL
, argp
);
2970 case NVME_IOCTL_ADMIN64_CMD
:
2971 return nvme_user_cmd64(ctrl
, NULL
, argp
);
2972 case NVME_IOCTL_IO_CMD
:
2973 return nvme_dev_user_cmd(ctrl
, argp
);
2974 case NVME_IOCTL_RESET
:
2975 dev_warn(ctrl
->device
, "resetting controller\n");
2976 return nvme_reset_ctrl_sync(ctrl
);
2977 case NVME_IOCTL_SUBSYS_RESET
:
2978 return nvme_reset_subsystem(ctrl
);
2979 case NVME_IOCTL_RESCAN
:
2980 nvme_queue_scan(ctrl
);
2987 static const struct file_operations nvme_dev_fops
= {
2988 .owner
= THIS_MODULE
,
2989 .open
= nvme_dev_open
,
2990 .unlocked_ioctl
= nvme_dev_ioctl
,
2991 .compat_ioctl
= compat_ptr_ioctl
,
2994 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
2995 struct device_attribute
*attr
, const char *buf
,
2998 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3001 ret
= nvme_reset_ctrl_sync(ctrl
);
3006 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
3008 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
3009 struct device_attribute
*attr
, const char *buf
,
3012 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3014 nvme_queue_scan(ctrl
);
3017 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
3019 static inline struct nvme_ns_head
*dev_to_ns_head(struct device
*dev
)
3021 struct gendisk
*disk
= dev_to_disk(dev
);
3023 if (disk
->fops
== &nvme_fops
)
3024 return nvme_get_ns_from_dev(dev
)->head
;
3026 return disk
->private_data
;
3029 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
3032 struct nvme_ns_head
*head
= dev_to_ns_head(dev
);
3033 struct nvme_ns_ids
*ids
= &head
->ids
;
3034 struct nvme_subsystem
*subsys
= head
->subsys
;
3035 int serial_len
= sizeof(subsys
->serial
);
3036 int model_len
= sizeof(subsys
->model
);
3038 if (!uuid_is_null(&ids
->uuid
))
3039 return sprintf(buf
, "uuid.%pU\n", &ids
->uuid
);
3041 if (memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3042 return sprintf(buf
, "eui.%16phN\n", ids
->nguid
);
3044 if (memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3045 return sprintf(buf
, "eui.%8phN\n", ids
->eui64
);
3047 while (serial_len
> 0 && (subsys
->serial
[serial_len
- 1] == ' ' ||
3048 subsys
->serial
[serial_len
- 1] == '\0'))
3050 while (model_len
> 0 && (subsys
->model
[model_len
- 1] == ' ' ||
3051 subsys
->model
[model_len
- 1] == '\0'))
3054 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", subsys
->vendor_id
,
3055 serial_len
, subsys
->serial
, model_len
, subsys
->model
,
3058 static DEVICE_ATTR_RO(wwid
);
3060 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
3063 return sprintf(buf
, "%pU\n", dev_to_ns_head(dev
)->ids
.nguid
);
3065 static DEVICE_ATTR_RO(nguid
);
3067 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
3070 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3072 /* For backward compatibility expose the NGUID to userspace if
3073 * we have no UUID set
3075 if (uuid_is_null(&ids
->uuid
)) {
3076 printk_ratelimited(KERN_WARNING
3077 "No UUID available providing old NGUID\n");
3078 return sprintf(buf
, "%pU\n", ids
->nguid
);
3080 return sprintf(buf
, "%pU\n", &ids
->uuid
);
3082 static DEVICE_ATTR_RO(uuid
);
3084 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
3087 return sprintf(buf
, "%8ph\n", dev_to_ns_head(dev
)->ids
.eui64
);
3089 static DEVICE_ATTR_RO(eui
);
3091 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
3094 return sprintf(buf
, "%d\n", dev_to_ns_head(dev
)->ns_id
);
3096 static DEVICE_ATTR_RO(nsid
);
3098 static struct attribute
*nvme_ns_id_attrs
[] = {
3099 &dev_attr_wwid
.attr
,
3100 &dev_attr_uuid
.attr
,
3101 &dev_attr_nguid
.attr
,
3103 &dev_attr_nsid
.attr
,
3104 #ifdef CONFIG_NVME_MULTIPATH
3105 &dev_attr_ana_grpid
.attr
,
3106 &dev_attr_ana_state
.attr
,
3111 static umode_t
nvme_ns_id_attrs_are_visible(struct kobject
*kobj
,
3112 struct attribute
*a
, int n
)
3114 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3115 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3117 if (a
== &dev_attr_uuid
.attr
) {
3118 if (uuid_is_null(&ids
->uuid
) &&
3119 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3122 if (a
== &dev_attr_nguid
.attr
) {
3123 if (!memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3126 if (a
== &dev_attr_eui
.attr
) {
3127 if (!memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3130 #ifdef CONFIG_NVME_MULTIPATH
3131 if (a
== &dev_attr_ana_grpid
.attr
|| a
== &dev_attr_ana_state
.attr
) {
3132 if (dev_to_disk(dev
)->fops
!= &nvme_fops
) /* per-path attr */
3134 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev
)->ctrl
))
3141 static const struct attribute_group nvme_ns_id_attr_group
= {
3142 .attrs
= nvme_ns_id_attrs
,
3143 .is_visible
= nvme_ns_id_attrs_are_visible
,
3146 const struct attribute_group
*nvme_ns_id_attr_groups
[] = {
3147 &nvme_ns_id_attr_group
,
3149 &nvme_nvm_attr_group
,
3154 #define nvme_show_str_function(field) \
3155 static ssize_t field##_show(struct device *dev, \
3156 struct device_attribute *attr, char *buf) \
3158 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3159 return sprintf(buf, "%.*s\n", \
3160 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3162 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3164 nvme_show_str_function(model
);
3165 nvme_show_str_function(serial
);
3166 nvme_show_str_function(firmware_rev
);
3168 #define nvme_show_int_function(field) \
3169 static ssize_t field##_show(struct device *dev, \
3170 struct device_attribute *attr, char *buf) \
3172 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3173 return sprintf(buf, "%d\n", ctrl->field); \
3175 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3177 nvme_show_int_function(cntlid
);
3178 nvme_show_int_function(numa_node
);
3179 nvme_show_int_function(queue_count
);
3180 nvme_show_int_function(sqsize
);
3182 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
3183 struct device_attribute
*attr
, const char *buf
,
3186 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3188 if (device_remove_file_self(dev
, attr
))
3189 nvme_delete_ctrl_sync(ctrl
);
3192 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
3194 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
3195 struct device_attribute
*attr
,
3198 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3200 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
3202 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
3204 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
3205 struct device_attribute
*attr
,
3208 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3209 static const char *const state_name
[] = {
3210 [NVME_CTRL_NEW
] = "new",
3211 [NVME_CTRL_LIVE
] = "live",
3212 [NVME_CTRL_RESETTING
] = "resetting",
3213 [NVME_CTRL_CONNECTING
] = "connecting",
3214 [NVME_CTRL_DELETING
] = "deleting",
3215 [NVME_CTRL_DEAD
] = "dead",
3218 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
3219 state_name
[ctrl
->state
])
3220 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
3222 return sprintf(buf
, "unknown state\n");
3225 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
3227 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
3228 struct device_attribute
*attr
,
3231 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3233 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->subsys
->subnqn
);
3235 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
3237 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
3238 struct device_attribute
*attr
,
3241 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3243 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
3245 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
3247 static struct attribute
*nvme_dev_attrs
[] = {
3248 &dev_attr_reset_controller
.attr
,
3249 &dev_attr_rescan_controller
.attr
,
3250 &dev_attr_model
.attr
,
3251 &dev_attr_serial
.attr
,
3252 &dev_attr_firmware_rev
.attr
,
3253 &dev_attr_cntlid
.attr
,
3254 &dev_attr_delete_controller
.attr
,
3255 &dev_attr_transport
.attr
,
3256 &dev_attr_subsysnqn
.attr
,
3257 &dev_attr_address
.attr
,
3258 &dev_attr_state
.attr
,
3259 &dev_attr_numa_node
.attr
,
3260 &dev_attr_queue_count
.attr
,
3261 &dev_attr_sqsize
.attr
,
3265 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
3266 struct attribute
*a
, int n
)
3268 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3269 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3271 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
3273 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
3279 static struct attribute_group nvme_dev_attrs_group
= {
3280 .attrs
= nvme_dev_attrs
,
3281 .is_visible
= nvme_dev_attrs_are_visible
,
3284 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
3285 &nvme_dev_attrs_group
,
3289 static struct nvme_ns_head
*__nvme_find_ns_head(struct nvme_subsystem
*subsys
,
3292 struct nvme_ns_head
*h
;
3294 lockdep_assert_held(&subsys
->lock
);
3296 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3297 if (h
->ns_id
== nsid
&& kref_get_unless_zero(&h
->ref
))
3304 static int __nvme_check_ids(struct nvme_subsystem
*subsys
,
3305 struct nvme_ns_head
*new)
3307 struct nvme_ns_head
*h
;
3309 lockdep_assert_held(&subsys
->lock
);
3311 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3312 if (nvme_ns_ids_valid(&new->ids
) &&
3313 !list_empty(&h
->list
) &&
3314 nvme_ns_ids_equal(&new->ids
, &h
->ids
))
3321 static struct nvme_ns_head
*nvme_alloc_ns_head(struct nvme_ctrl
*ctrl
,
3322 unsigned nsid
, struct nvme_id_ns
*id
)
3324 struct nvme_ns_head
*head
;
3325 size_t size
= sizeof(*head
);
3328 #ifdef CONFIG_NVME_MULTIPATH
3329 size
+= num_possible_nodes() * sizeof(struct nvme_ns
*);
3332 head
= kzalloc(size
, GFP_KERNEL
);
3335 ret
= ida_simple_get(&ctrl
->subsys
->ns_ida
, 1, 0, GFP_KERNEL
);
3338 head
->instance
= ret
;
3339 INIT_LIST_HEAD(&head
->list
);
3340 ret
= init_srcu_struct(&head
->srcu
);
3342 goto out_ida_remove
;
3343 head
->subsys
= ctrl
->subsys
;
3345 kref_init(&head
->ref
);
3347 ret
= nvme_report_ns_ids(ctrl
, nsid
, id
, &head
->ids
);
3349 goto out_cleanup_srcu
;
3351 ret
= __nvme_check_ids(ctrl
->subsys
, head
);
3353 dev_err(ctrl
->device
,
3354 "duplicate IDs for nsid %d\n", nsid
);
3355 goto out_cleanup_srcu
;
3358 ret
= nvme_mpath_alloc_disk(ctrl
, head
);
3360 goto out_cleanup_srcu
;
3362 list_add_tail(&head
->entry
, &ctrl
->subsys
->nsheads
);
3364 kref_get(&ctrl
->subsys
->ref
);
3368 cleanup_srcu_struct(&head
->srcu
);
3370 ida_simple_remove(&ctrl
->subsys
->ns_ida
, head
->instance
);
3375 ret
= blk_status_to_errno(nvme_error_status(ret
));
3376 return ERR_PTR(ret
);
3379 static int nvme_init_ns_head(struct nvme_ns
*ns
, unsigned nsid
,
3380 struct nvme_id_ns
*id
)
3382 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
3383 bool is_shared
= id
->nmic
& (1 << 0);
3384 struct nvme_ns_head
*head
= NULL
;
3387 mutex_lock(&ctrl
->subsys
->lock
);
3389 head
= __nvme_find_ns_head(ctrl
->subsys
, nsid
);
3391 head
= nvme_alloc_ns_head(ctrl
, nsid
, id
);
3393 ret
= PTR_ERR(head
);
3397 struct nvme_ns_ids ids
;
3399 ret
= nvme_report_ns_ids(ctrl
, nsid
, id
, &ids
);
3403 if (!nvme_ns_ids_equal(&head
->ids
, &ids
)) {
3404 dev_err(ctrl
->device
,
3405 "IDs don't match for shared namespace %d\n",
3412 list_add_tail(&ns
->siblings
, &head
->list
);
3416 mutex_unlock(&ctrl
->subsys
->lock
);
3418 ret
= blk_status_to_errno(nvme_error_status(ret
));
3422 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3424 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
3425 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
3427 return nsa
->head
->ns_id
- nsb
->head
->ns_id
;
3430 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3432 struct nvme_ns
*ns
, *ret
= NULL
;
3434 down_read(&ctrl
->namespaces_rwsem
);
3435 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3436 if (ns
->head
->ns_id
== nsid
) {
3437 if (!kref_get_unless_zero(&ns
->kref
))
3442 if (ns
->head
->ns_id
> nsid
)
3445 up_read(&ctrl
->namespaces_rwsem
);
3449 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
)
3451 struct streams_directive_params s
;
3454 if (!ctrl
->nr_streams
)
3457 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->head
->ns_id
);
3461 ns
->sws
= le32_to_cpu(s
.sws
);
3462 ns
->sgs
= le16_to_cpu(s
.sgs
);
3465 unsigned int bs
= 1 << ns
->lba_shift
;
3467 blk_queue_io_min(ns
->queue
, bs
* ns
->sws
);
3469 blk_queue_io_opt(ns
->queue
, bs
* ns
->sws
* ns
->sgs
);
3475 static int nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3478 struct gendisk
*disk
;
3479 struct nvme_id_ns
*id
;
3480 char disk_name
[DISK_NAME_LEN
];
3481 int node
= ctrl
->numa_node
, flags
= GENHD_FL_EXT_DEVT
, ret
;
3483 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
3487 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
3488 if (IS_ERR(ns
->queue
)) {
3489 ret
= PTR_ERR(ns
->queue
);
3493 if (ctrl
->opts
&& ctrl
->opts
->data_digest
)
3494 ns
->queue
->backing_dev_info
->capabilities
3495 |= BDI_CAP_STABLE_WRITES
;
3497 blk_queue_flag_set(QUEUE_FLAG_NONROT
, ns
->queue
);
3498 if (ctrl
->ops
->flags
& NVME_F_PCI_P2PDMA
)
3499 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA
, ns
->queue
);
3501 ns
->queue
->queuedata
= ns
;
3504 kref_init(&ns
->kref
);
3505 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
3507 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
3508 nvme_set_queue_limits(ctrl
, ns
->queue
);
3510 ret
= nvme_identify_ns(ctrl
, nsid
, &id
);
3512 goto out_free_queue
;
3514 if (id
->ncap
== 0) {
3519 ret
= nvme_init_ns_head(ns
, nsid
, id
);
3522 nvme_setup_streams_ns(ctrl
, ns
);
3523 nvme_set_disk_name(disk_name
, ns
, ctrl
, &flags
);
3525 disk
= alloc_disk_node(0, node
);
3531 disk
->fops
= &nvme_fops
;
3532 disk
->private_data
= ns
;
3533 disk
->queue
= ns
->queue
;
3534 disk
->flags
= flags
;
3535 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
3538 __nvme_revalidate_disk(disk
, id
);
3540 if ((ctrl
->quirks
& NVME_QUIRK_LIGHTNVM
) && id
->vs
[0] == 0x1) {
3541 ret
= nvme_nvm_register(ns
, disk_name
, node
);
3543 dev_warn(ctrl
->device
, "LightNVM init failure\n");
3548 down_write(&ctrl
->namespaces_rwsem
);
3549 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
3550 up_write(&ctrl
->namespaces_rwsem
);
3552 nvme_get_ctrl(ctrl
);
3554 device_add_disk(ctrl
->device
, ns
->disk
, nvme_ns_id_attr_groups
);
3556 nvme_mpath_add_disk(ns
, id
);
3557 nvme_fault_inject_init(&ns
->fault_inject
, ns
->disk
->disk_name
);
3564 mutex_lock(&ctrl
->subsys
->lock
);
3565 list_del_rcu(&ns
->siblings
);
3566 mutex_unlock(&ctrl
->subsys
->lock
);
3567 nvme_put_ns_head(ns
->head
);
3571 blk_cleanup_queue(ns
->queue
);
3575 ret
= blk_status_to_errno(nvme_error_status(ret
));
3579 static void nvme_ns_remove(struct nvme_ns
*ns
)
3581 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
3584 nvme_fault_inject_fini(&ns
->fault_inject
);
3586 mutex_lock(&ns
->ctrl
->subsys
->lock
);
3587 list_del_rcu(&ns
->siblings
);
3588 mutex_unlock(&ns
->ctrl
->subsys
->lock
);
3589 synchronize_rcu(); /* guarantee not available in head->list */
3590 nvme_mpath_clear_current_path(ns
);
3591 synchronize_srcu(&ns
->head
->srcu
); /* wait for concurrent submissions */
3593 if (ns
->disk
&& ns
->disk
->flags
& GENHD_FL_UP
) {
3594 del_gendisk(ns
->disk
);
3595 blk_cleanup_queue(ns
->queue
);
3596 if (blk_get_integrity(ns
->disk
))
3597 blk_integrity_unregister(ns
->disk
);
3600 down_write(&ns
->ctrl
->namespaces_rwsem
);
3601 list_del_init(&ns
->list
);
3602 up_write(&ns
->ctrl
->namespaces_rwsem
);
3604 nvme_mpath_check_last_path(ns
);
3608 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3612 ns
= nvme_find_get_ns(ctrl
, nsid
);
3614 if (ns
->disk
&& revalidate_disk(ns
->disk
))
3618 nvme_alloc_ns(ctrl
, nsid
);
3621 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
3624 struct nvme_ns
*ns
, *next
;
3627 down_write(&ctrl
->namespaces_rwsem
);
3628 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
3629 if (ns
->head
->ns_id
> nsid
|| test_bit(NVME_NS_DEAD
, &ns
->flags
))
3630 list_move_tail(&ns
->list
, &rm_list
);
3632 up_write(&ctrl
->namespaces_rwsem
);
3634 list_for_each_entry_safe(ns
, next
, &rm_list
, list
)
3639 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
3643 unsigned i
, j
, nsid
, prev
= 0;
3644 unsigned num_lists
= DIV_ROUND_UP_ULL((u64
)nn
, 1024);
3647 ns_list
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
3651 for (i
= 0; i
< num_lists
; i
++) {
3652 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
3656 for (j
= 0; j
< min(nn
, 1024U); j
++) {
3657 nsid
= le32_to_cpu(ns_list
[j
]);
3661 nvme_validate_ns(ctrl
, nsid
);
3663 while (++prev
< nsid
) {
3664 ns
= nvme_find_get_ns(ctrl
, prev
);
3674 nvme_remove_invalid_namespaces(ctrl
, prev
);
3680 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
3684 for (i
= 1; i
<= nn
; i
++)
3685 nvme_validate_ns(ctrl
, i
);
3687 nvme_remove_invalid_namespaces(ctrl
, nn
);
3690 static void nvme_clear_changed_ns_log(struct nvme_ctrl
*ctrl
)
3692 size_t log_size
= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
);
3696 log
= kzalloc(log_size
, GFP_KERNEL
);
3701 * We need to read the log to clear the AEN, but we don't want to rely
3702 * on it for the changed namespace information as userspace could have
3703 * raced with us in reading the log page, which could cause us to miss
3706 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CHANGED_NS
, 0, log
,
3709 dev_warn(ctrl
->device
,
3710 "reading changed ns log failed: %d\n", error
);
3715 static void nvme_scan_work(struct work_struct
*work
)
3717 struct nvme_ctrl
*ctrl
=
3718 container_of(work
, struct nvme_ctrl
, scan_work
);
3719 struct nvme_id_ctrl
*id
;
3722 /* No tagset on a live ctrl means IO queues could not created */
3723 if (ctrl
->state
!= NVME_CTRL_LIVE
|| !ctrl
->tagset
)
3726 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
)) {
3727 dev_info(ctrl
->device
, "rescanning namespaces.\n");
3728 nvme_clear_changed_ns_log(ctrl
);
3731 if (nvme_identify_ctrl(ctrl
, &id
))
3734 mutex_lock(&ctrl
->scan_lock
);
3735 nn
= le32_to_cpu(id
->nn
);
3736 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
3737 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
3738 if (!nvme_scan_ns_list(ctrl
, nn
))
3741 nvme_scan_ns_sequential(ctrl
, nn
);
3743 mutex_unlock(&ctrl
->scan_lock
);
3745 down_write(&ctrl
->namespaces_rwsem
);
3746 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
3747 up_write(&ctrl
->namespaces_rwsem
);
3751 * This function iterates the namespace list unlocked to allow recovery from
3752 * controller failure. It is up to the caller to ensure the namespace list is
3753 * not modified by scan work while this function is executing.
3755 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
3757 struct nvme_ns
*ns
, *next
;
3761 * make sure to requeue I/O to all namespaces as these
3762 * might result from the scan itself and must complete
3763 * for the scan_work to make progress
3765 nvme_mpath_clear_ctrl_paths(ctrl
);
3767 /* prevent racing with ns scanning */
3768 flush_work(&ctrl
->scan_work
);
3771 * The dead states indicates the controller was not gracefully
3772 * disconnected. In that case, we won't be able to flush any data while
3773 * removing the namespaces' disks; fail all the queues now to avoid
3774 * potentially having to clean up the failed sync later.
3776 if (ctrl
->state
== NVME_CTRL_DEAD
)
3777 nvme_kill_queues(ctrl
);
3779 down_write(&ctrl
->namespaces_rwsem
);
3780 list_splice_init(&ctrl
->namespaces
, &ns_list
);
3781 up_write(&ctrl
->namespaces_rwsem
);
3783 list_for_each_entry_safe(ns
, next
, &ns_list
, list
)
3786 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
3788 static int nvme_class_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
3790 struct nvme_ctrl
*ctrl
=
3791 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
3792 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3795 ret
= add_uevent_var(env
, "NVME_TRTYPE=%s", ctrl
->ops
->name
);
3800 ret
= add_uevent_var(env
, "NVME_TRADDR=%s", opts
->traddr
);
3804 ret
= add_uevent_var(env
, "NVME_TRSVCID=%s",
3805 opts
->trsvcid
?: "none");
3809 ret
= add_uevent_var(env
, "NVME_HOST_TRADDR=%s",
3810 opts
->host_traddr
?: "none");
3815 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
3817 char *envp
[2] = { NULL
, NULL
};
3818 u32 aen_result
= ctrl
->aen_result
;
3820 ctrl
->aen_result
= 0;
3824 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
3827 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
3831 static void nvme_async_event_work(struct work_struct
*work
)
3833 struct nvme_ctrl
*ctrl
=
3834 container_of(work
, struct nvme_ctrl
, async_event_work
);
3836 nvme_aen_uevent(ctrl
);
3837 ctrl
->ops
->submit_async_event(ctrl
);
3840 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
3845 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
3851 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
3854 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
3856 struct nvme_fw_slot_info_log
*log
;
3858 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
3862 if (nvme_get_log(ctrl
, NVME_NSID_ALL
, 0, NVME_LOG_FW_SLOT
, log
,
3864 dev_warn(ctrl
->device
, "Get FW SLOT INFO log error\n");
3868 static void nvme_fw_act_work(struct work_struct
*work
)
3870 struct nvme_ctrl
*ctrl
= container_of(work
,
3871 struct nvme_ctrl
, fw_act_work
);
3872 unsigned long fw_act_timeout
;
3875 fw_act_timeout
= jiffies
+
3876 msecs_to_jiffies(ctrl
->mtfa
* 100);
3878 fw_act_timeout
= jiffies
+
3879 msecs_to_jiffies(admin_timeout
* 1000);
3881 nvme_stop_queues(ctrl
);
3882 while (nvme_ctrl_pp_status(ctrl
)) {
3883 if (time_after(jiffies
, fw_act_timeout
)) {
3884 dev_warn(ctrl
->device
,
3885 "Fw activation timeout, reset controller\n");
3886 nvme_try_sched_reset(ctrl
);
3892 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
))
3895 nvme_start_queues(ctrl
);
3896 /* read FW slot information to clear the AER */
3897 nvme_get_fw_slot_info(ctrl
);
3900 static void nvme_handle_aen_notice(struct nvme_ctrl
*ctrl
, u32 result
)
3902 u32 aer_notice_type
= (result
& 0xff00) >> 8;
3904 trace_nvme_async_event(ctrl
, aer_notice_type
);
3906 switch (aer_notice_type
) {
3907 case NVME_AER_NOTICE_NS_CHANGED
:
3908 set_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
);
3909 nvme_queue_scan(ctrl
);
3911 case NVME_AER_NOTICE_FW_ACT_STARTING
:
3913 * We are (ab)using the RESETTING state to prevent subsequent
3914 * recovery actions from interfering with the controller's
3915 * firmware activation.
3917 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
3918 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
3920 #ifdef CONFIG_NVME_MULTIPATH
3921 case NVME_AER_NOTICE_ANA
:
3922 if (!ctrl
->ana_log_buf
)
3924 queue_work(nvme_wq
, &ctrl
->ana_work
);
3927 case NVME_AER_NOTICE_DISC_CHANGED
:
3928 ctrl
->aen_result
= result
;
3931 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
3935 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
3936 volatile union nvme_result
*res
)
3938 u32 result
= le32_to_cpu(res
->u32
);
3939 u32 aer_type
= result
& 0x07;
3941 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
3945 case NVME_AER_NOTICE
:
3946 nvme_handle_aen_notice(ctrl
, result
);
3948 case NVME_AER_ERROR
:
3949 case NVME_AER_SMART
:
3952 trace_nvme_async_event(ctrl
, aer_type
);
3953 ctrl
->aen_result
= result
;
3958 queue_work(nvme_wq
, &ctrl
->async_event_work
);
3960 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
3962 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
3964 nvme_mpath_stop(ctrl
);
3965 nvme_stop_keep_alive(ctrl
);
3966 flush_work(&ctrl
->async_event_work
);
3967 cancel_work_sync(&ctrl
->fw_act_work
);
3969 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
3971 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
3974 nvme_start_keep_alive(ctrl
);
3976 nvme_enable_aen(ctrl
);
3978 if (ctrl
->queue_count
> 1) {
3979 nvme_queue_scan(ctrl
);
3980 nvme_start_queues(ctrl
);
3983 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
3985 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
3987 nvme_fault_inject_fini(&ctrl
->fault_inject
);
3988 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
3989 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
3991 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
3993 static void nvme_free_ctrl(struct device
*dev
)
3995 struct nvme_ctrl
*ctrl
=
3996 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
3997 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
3999 if (subsys
&& ctrl
->instance
!= subsys
->instance
)
4000 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4002 kfree(ctrl
->effects
);
4003 nvme_mpath_uninit(ctrl
);
4004 __free_page(ctrl
->discard_page
);
4007 mutex_lock(&nvme_subsystems_lock
);
4008 list_del(&ctrl
->subsys_entry
);
4009 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
4010 mutex_unlock(&nvme_subsystems_lock
);
4013 ctrl
->ops
->free_ctrl(ctrl
);
4016 nvme_put_subsystem(subsys
);
4020 * Initialize a NVMe controller structures. This needs to be called during
4021 * earliest initialization so that we have the initialized structured around
4024 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
4025 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
4029 ctrl
->state
= NVME_CTRL_NEW
;
4030 spin_lock_init(&ctrl
->lock
);
4031 mutex_init(&ctrl
->scan_lock
);
4032 INIT_LIST_HEAD(&ctrl
->namespaces
);
4033 init_rwsem(&ctrl
->namespaces_rwsem
);
4036 ctrl
->quirks
= quirks
;
4037 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
4038 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
4039 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
4040 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
4041 init_waitqueue_head(&ctrl
->state_wq
);
4043 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
4044 memset(&ctrl
->ka_cmd
, 0, sizeof(ctrl
->ka_cmd
));
4045 ctrl
->ka_cmd
.common
.opcode
= nvme_admin_keep_alive
;
4047 BUILD_BUG_ON(NVME_DSM_MAX_RANGES
* sizeof(struct nvme_dsm_range
) >
4049 ctrl
->discard_page
= alloc_page(GFP_KERNEL
);
4050 if (!ctrl
->discard_page
) {
4055 ret
= ida_simple_get(&nvme_instance_ida
, 0, 0, GFP_KERNEL
);
4058 ctrl
->instance
= ret
;
4060 device_initialize(&ctrl
->ctrl_device
);
4061 ctrl
->device
= &ctrl
->ctrl_device
;
4062 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_chr_devt
), ctrl
->instance
);
4063 ctrl
->device
->class = nvme_class
;
4064 ctrl
->device
->parent
= ctrl
->dev
;
4065 ctrl
->device
->groups
= nvme_dev_attr_groups
;
4066 ctrl
->device
->release
= nvme_free_ctrl
;
4067 dev_set_drvdata(ctrl
->device
, ctrl
);
4068 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
4070 goto out_release_instance
;
4072 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
4073 ctrl
->cdev
.owner
= ops
->module
;
4074 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
4079 * Initialize latency tolerance controls. The sysfs files won't
4080 * be visible to userspace unless the device actually supports APST.
4082 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
4083 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
4084 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
4086 nvme_fault_inject_init(&ctrl
->fault_inject
, dev_name(ctrl
->device
));
4090 kfree_const(ctrl
->device
->kobj
.name
);
4091 out_release_instance
:
4092 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
4094 if (ctrl
->discard_page
)
4095 __free_page(ctrl
->discard_page
);
4098 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
4101 * nvme_kill_queues(): Ends all namespace queues
4102 * @ctrl: the dead controller that needs to end
4104 * Call this function when the driver determines it is unable to get the
4105 * controller in a state capable of servicing IO.
4107 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
4111 down_read(&ctrl
->namespaces_rwsem
);
4113 /* Forcibly unquiesce queues to avoid blocking dispatch */
4114 if (ctrl
->admin_q
&& !blk_queue_dying(ctrl
->admin_q
))
4115 blk_mq_unquiesce_queue(ctrl
->admin_q
);
4117 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4118 nvme_set_queue_dying(ns
);
4120 up_read(&ctrl
->namespaces_rwsem
);
4122 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
4124 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
4128 down_read(&ctrl
->namespaces_rwsem
);
4129 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4130 blk_mq_unfreeze_queue(ns
->queue
);
4131 up_read(&ctrl
->namespaces_rwsem
);
4133 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
4135 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
4139 down_read(&ctrl
->namespaces_rwsem
);
4140 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
4141 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
4145 up_read(&ctrl
->namespaces_rwsem
);
4147 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
4149 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
4153 down_read(&ctrl
->namespaces_rwsem
);
4154 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4155 blk_mq_freeze_queue_wait(ns
->queue
);
4156 up_read(&ctrl
->namespaces_rwsem
);
4158 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
4160 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
4164 down_read(&ctrl
->namespaces_rwsem
);
4165 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4166 blk_freeze_queue_start(ns
->queue
);
4167 up_read(&ctrl
->namespaces_rwsem
);
4169 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
4171 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
4175 down_read(&ctrl
->namespaces_rwsem
);
4176 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4177 blk_mq_quiesce_queue(ns
->queue
);
4178 up_read(&ctrl
->namespaces_rwsem
);
4180 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
4182 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
4186 down_read(&ctrl
->namespaces_rwsem
);
4187 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4188 blk_mq_unquiesce_queue(ns
->queue
);
4189 up_read(&ctrl
->namespaces_rwsem
);
4191 EXPORT_SYMBOL_GPL(nvme_start_queues
);
4194 void nvme_sync_queues(struct nvme_ctrl
*ctrl
)
4198 down_read(&ctrl
->namespaces_rwsem
);
4199 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4200 blk_sync_queue(ns
->queue
);
4201 up_read(&ctrl
->namespaces_rwsem
);
4204 blk_sync_queue(ctrl
->admin_q
);
4206 EXPORT_SYMBOL_GPL(nvme_sync_queues
);
4209 * Check we didn't inadvertently grow the command structure sizes:
4211 static inline void _nvme_check_size(void)
4213 BUILD_BUG_ON(sizeof(struct nvme_common_command
) != 64);
4214 BUILD_BUG_ON(sizeof(struct nvme_rw_command
) != 64);
4215 BUILD_BUG_ON(sizeof(struct nvme_identify
) != 64);
4216 BUILD_BUG_ON(sizeof(struct nvme_features
) != 64);
4217 BUILD_BUG_ON(sizeof(struct nvme_download_firmware
) != 64);
4218 BUILD_BUG_ON(sizeof(struct nvme_format_cmd
) != 64);
4219 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd
) != 64);
4220 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd
) != 64);
4221 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd
) != 64);
4222 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command
) != 64);
4223 BUILD_BUG_ON(sizeof(struct nvme_command
) != 64);
4224 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl
) != NVME_IDENTIFY_DATA_SIZE
);
4225 BUILD_BUG_ON(sizeof(struct nvme_id_ns
) != NVME_IDENTIFY_DATA_SIZE
);
4226 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type
) != 64);
4227 BUILD_BUG_ON(sizeof(struct nvme_smart_log
) != 512);
4228 BUILD_BUG_ON(sizeof(struct nvme_dbbuf
) != 64);
4229 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd
) != 64);
4233 static int __init
nvme_core_init(void)
4235 int result
= -ENOMEM
;
4239 nvme_wq
= alloc_workqueue("nvme-wq",
4240 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4244 nvme_reset_wq
= alloc_workqueue("nvme-reset-wq",
4245 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4249 nvme_delete_wq
= alloc_workqueue("nvme-delete-wq",
4250 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4251 if (!nvme_delete_wq
)
4252 goto destroy_reset_wq
;
4254 result
= alloc_chrdev_region(&nvme_chr_devt
, 0, NVME_MINORS
, "nvme");
4256 goto destroy_delete_wq
;
4258 nvme_class
= class_create(THIS_MODULE
, "nvme");
4259 if (IS_ERR(nvme_class
)) {
4260 result
= PTR_ERR(nvme_class
);
4261 goto unregister_chrdev
;
4263 nvme_class
->dev_uevent
= nvme_class_uevent
;
4265 nvme_subsys_class
= class_create(THIS_MODULE
, "nvme-subsystem");
4266 if (IS_ERR(nvme_subsys_class
)) {
4267 result
= PTR_ERR(nvme_subsys_class
);
4273 class_destroy(nvme_class
);
4275 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
4277 destroy_workqueue(nvme_delete_wq
);
4279 destroy_workqueue(nvme_reset_wq
);
4281 destroy_workqueue(nvme_wq
);
4286 static void __exit
nvme_core_exit(void)
4288 class_destroy(nvme_subsys_class
);
4289 class_destroy(nvme_class
);
4290 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
4291 destroy_workqueue(nvme_delete_wq
);
4292 destroy_workqueue(nvme_reset_wq
);
4293 destroy_workqueue(nvme_wq
);
4296 MODULE_LICENSE("GPL");
4297 MODULE_VERSION("1.0");
4298 module_init(nvme_core_init
);
4299 module_exit(nvme_core_exit
);