1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2011-2014, Intel Corporation.
9 #include <linux/nvme.h>
10 #include <linux/cdev.h>
11 #include <linux/pci.h>
12 #include <linux/kref.h>
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
16 #include <linux/rcupdate.h>
17 #include <linux/wait.h>
18 #include <linux/t10-pi.h>
20 #include <trace/events/block.h>
22 extern unsigned int nvme_io_timeout
;
23 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
25 extern unsigned int admin_timeout
;
26 #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
28 #define NVME_DEFAULT_KATO 5
30 #ifdef CONFIG_ARCH_NO_SG_CHAIN
31 #define NVME_INLINE_SG_CNT 0
32 #define NVME_INLINE_METADATA_SG_CNT 0
34 #define NVME_INLINE_SG_CNT 2
35 #define NVME_INLINE_METADATA_SG_CNT 1
39 * Default to a 4K page size, with the intention to update this
40 * path in the future to accommodate architectures with differing
41 * kernel and IO page sizes.
43 #define NVME_CTRL_PAGE_SHIFT 12
44 #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
46 extern struct workqueue_struct
*nvme_wq
;
47 extern struct workqueue_struct
*nvme_reset_wq
;
48 extern struct workqueue_struct
*nvme_delete_wq
;
51 * List of workarounds for devices that required behavior not specified in
56 * Prefers I/O aligned to a stripe size specified in a vendor
57 * specific Identify field.
59 NVME_QUIRK_STRIPE_SIZE
= (1 << 0),
62 * The controller doesn't handle Identify value others than 0 or 1
65 NVME_QUIRK_IDENTIFY_CNS
= (1 << 1),
68 * The controller deterministically returns O's on reads to
69 * logical blocks that deallocate was called on.
71 NVME_QUIRK_DEALLOCATE_ZEROES
= (1 << 2),
74 * The controller needs a delay before starts checking the device
75 * readiness, which is done by reading the NVME_CSTS_RDY bit.
77 NVME_QUIRK_DELAY_BEFORE_CHK_RDY
= (1 << 3),
80 * APST should not be used.
82 NVME_QUIRK_NO_APST
= (1 << 4),
85 * The deepest sleep state should not be used.
87 NVME_QUIRK_NO_DEEPEST_PS
= (1 << 5),
90 * Set MEDIUM priority on SQ creation
92 NVME_QUIRK_MEDIUM_PRIO_SQ
= (1 << 7),
95 * Ignore device provided subnqn.
97 NVME_QUIRK_IGNORE_DEV_SUBNQN
= (1 << 8),
100 * Broken Write Zeroes.
102 NVME_QUIRK_DISABLE_WRITE_ZEROES
= (1 << 9),
105 * Force simple suspend/resume path.
107 NVME_QUIRK_SIMPLE_SUSPEND
= (1 << 10),
110 * Use only one interrupt vector for all queues
112 NVME_QUIRK_SINGLE_VECTOR
= (1 << 11),
115 * Use non-standard 128 bytes SQEs.
117 NVME_QUIRK_128_BYTES_SQES
= (1 << 12),
120 * Prevent tag overlap between queues
122 NVME_QUIRK_SHARED_TAGS
= (1 << 13),
125 * Don't change the value of the temperature threshold feature
127 NVME_QUIRK_NO_TEMP_THRESH_CHANGE
= (1 << 14),
130 * The controller doesn't handle the Identify Namespace
131 * Identification Descriptor list subcommand despite claiming
132 * NVMe 1.3 compliance.
134 NVME_QUIRK_NO_NS_DESC_LIST
= (1 << 15),
137 * The controller does not properly handle DMA addresses over
140 NVME_QUIRK_DMA_ADDRESS_BITS_48
= (1 << 16),
144 * Common request structure for NVMe passthrough. All drivers must have
145 * this structure as the first member of their request-private data.
147 struct nvme_request
{
148 struct nvme_command
*cmd
;
149 union nvme_result result
;
154 struct nvme_ctrl
*ctrl
;
158 * Mark a bio as coming in through the mpath node.
160 #define REQ_NVME_MPATH REQ_DRV
163 NVME_REQ_CANCELLED
= (1 << 0),
164 NVME_REQ_USERCMD
= (1 << 1),
167 static inline struct nvme_request
*nvme_req(struct request
*req
)
169 return blk_mq_rq_to_pdu(req
);
172 static inline u16
nvme_req_qid(struct request
*req
)
174 if (!req
->q
->queuedata
)
177 return req
->mq_hctx
->queue_num
+ 1;
180 /* The below value is the specific amount of delay needed before checking
181 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
182 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
185 #define NVME_QUIRK_DELAY_AMOUNT 2300
188 * enum nvme_ctrl_state: Controller state
190 * @NVME_CTRL_NEW: New controller just allocated, initial state
191 * @NVME_CTRL_LIVE: Controller is connected and I/O capable
192 * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
193 * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
195 * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
196 * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
197 * disabled/failed immediately. This state comes
198 * after all async event processing took place and
199 * before ns removal and the controller deletion
201 * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
202 * shutdown or removal. In this case we forcibly
203 * kill all inflight I/O as they have no chance to
206 enum nvme_ctrl_state
{
210 NVME_CTRL_CONNECTING
,
212 NVME_CTRL_DELETING_NOIO
,
216 struct nvme_fault_inject
{
217 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
218 struct fault_attr attr
;
219 struct dentry
*parent
;
220 bool dont_retry
; /* DNR, do not retry */
221 u16 status
; /* status code */
227 enum nvme_ctrl_state state
;
230 struct mutex scan_lock
;
231 const struct nvme_ctrl_ops
*ops
;
232 struct request_queue
*admin_q
;
233 struct request_queue
*connect_q
;
234 struct request_queue
*fabrics_q
;
238 struct blk_mq_tag_set
*tagset
;
239 struct blk_mq_tag_set
*admin_tagset
;
240 struct list_head namespaces
;
241 struct rw_semaphore namespaces_rwsem
;
242 struct device ctrl_device
;
243 struct device
*device
; /* char device */
244 #ifdef CONFIG_NVME_HWMON
245 struct device
*hwmon_device
;
248 struct work_struct reset_work
;
249 struct work_struct delete_work
;
250 wait_queue_head_t state_wq
;
252 struct nvme_subsystem
*subsys
;
253 struct list_head subsys_entry
;
255 struct opal_dev
*opal_dev
;
267 u32 max_integrity_segments
;
268 u32 max_discard_sectors
;
269 u32 max_discard_segments
;
270 u32 max_zeroes_sectors
;
271 #ifdef CONFIG_BLK_DEV_ZONED
281 atomic_t abort_limit
;
293 unsigned int shutdown_timeout
;
296 unsigned long quirks
;
297 struct nvme_id_power_state psd
[32];
298 struct nvme_effects_log
*effects
;
300 struct work_struct scan_work
;
301 struct work_struct async_event_work
;
302 struct delayed_work ka_work
;
303 struct delayed_work failfast_work
;
304 struct nvme_command ka_cmd
;
305 struct work_struct fw_act_work
;
306 unsigned long events
;
308 #ifdef CONFIG_NVME_MULTIPATH
309 /* asymmetric namespace access: */
314 struct mutex ana_lock
;
315 struct nvme_ana_rsp_hdr
*ana_log_buf
;
317 struct timer_list anatt_timer
;
318 struct work_struct ana_work
;
321 /* Power saving configuration */
322 u64 ps_max_latency_us
;
338 #define NVME_CTRL_FAILFAST_EXPIRED 0
339 struct nvmf_ctrl_options
*opts
;
341 struct page
*discard_page
;
342 unsigned long discard_page_busy
;
344 struct nvme_fault_inject fault_inject
;
352 struct nvme_subsystem
{
356 * Because we unregister the device on the last put we need
357 * a separate refcount.
360 struct list_head entry
;
362 struct list_head ctrls
;
363 struct list_head nsheads
;
364 char subnqn
[NVMF_NQN_SIZE
];
367 char firmware_rev
[8];
370 u16 awupf
; /* 0's based awupf value. */
372 #ifdef CONFIG_NVME_MULTIPATH
373 enum nvme_iopolicy iopolicy
;
378 * Container structure for uniqueue namespace identifiers.
388 * Anchor structure for namespaces. There is one for each namespace in a
389 * NVMe subsystem that any of our controllers can see, and the namespace
390 * structure for each controller is chained of it. For private namespaces
391 * there is a 1:1 relation to our namespace structures, that is ->list
392 * only ever has a single entry for private namespaces.
394 struct nvme_ns_head
{
395 struct list_head list
;
396 struct srcu_struct srcu
;
397 struct nvme_subsystem
*subsys
;
399 struct nvme_ns_ids ids
;
400 struct list_head entry
;
404 struct nvme_effects_log
*effects
;
407 struct device cdev_device
;
409 struct gendisk
*disk
;
410 #ifdef CONFIG_NVME_MULTIPATH
411 struct bio_list requeue_list
;
412 spinlock_t requeue_lock
;
413 struct work_struct requeue_work
;
416 #define NVME_NSHEAD_DISK_LIVE 0
417 struct nvme_ns __rcu
*current_path
[];
421 static inline bool nvme_ns_head_multipath(struct nvme_ns_head
*head
)
423 return IS_ENABLED(CONFIG_NVME_MULTIPATH
) && head
->disk
;
426 enum nvme_ns_features
{
427 NVME_NS_EXT_LBAS
= 1 << 0, /* support extended LBA format */
428 NVME_NS_METADATA_SUPPORTED
= 1 << 1, /* support getting generated md */
432 struct list_head list
;
434 struct nvme_ctrl
*ctrl
;
435 struct request_queue
*queue
;
436 struct gendisk
*disk
;
437 #ifdef CONFIG_NVME_MULTIPATH
438 enum nvme_ana_state ana_state
;
441 struct list_head siblings
;
443 struct nvme_ns_head
*head
;
450 #ifdef CONFIG_BLK_DEV_ZONED
453 unsigned long features
;
455 #define NVME_NS_REMOVING 0
456 #define NVME_NS_DEAD 1
457 #define NVME_NS_ANA_PENDING 2
458 #define NVME_NS_FORCE_RO 3
459 #define NVME_NS_READY 4
462 struct device cdev_device
;
464 struct nvme_fault_inject fault_inject
;
468 /* NVMe ns supports metadata actions by the controller (generate/strip) */
469 static inline bool nvme_ns_has_pi(struct nvme_ns
*ns
)
471 return ns
->pi_type
&& ns
->ms
== sizeof(struct t10_pi_tuple
);
474 struct nvme_ctrl_ops
{
476 struct module
*module
;
478 #define NVME_F_FABRICS (1 << 0)
479 #define NVME_F_METADATA_SUPPORTED (1 << 1)
480 #define NVME_F_PCI_P2PDMA (1 << 2)
481 int (*reg_read32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
);
482 int (*reg_write32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
);
483 int (*reg_read64
)(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
);
484 void (*free_ctrl
)(struct nvme_ctrl
*ctrl
);
485 void (*submit_async_event
)(struct nvme_ctrl
*ctrl
);
486 void (*delete_ctrl
)(struct nvme_ctrl
*ctrl
);
487 int (*get_address
)(struct nvme_ctrl
*ctrl
, char *buf
, int size
);
491 * nvme command_id is constructed as such:
492 * | xxxx | xxxxxxxxxxxx |
495 #define nvme_genctr_mask(gen) (gen & 0xf)
496 #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
497 #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
498 #define nvme_tag_from_cid(cid) (cid & 0xfff)
500 static inline u16
nvme_cid(struct request
*rq
)
502 return nvme_cid_install_genctr(nvme_req(rq
)->genctr
) | rq
->tag
;
505 static inline struct request
*nvme_find_rq(struct blk_mq_tags
*tags
,
508 u8 genctr
= nvme_genctr_from_cid(command_id
);
509 u16 tag
= nvme_tag_from_cid(command_id
);
512 rq
= blk_mq_tag_to_rq(tags
, tag
);
514 pr_err("could not locate request for tag %#x\n",
518 if (unlikely(nvme_genctr_mask(nvme_req(rq
)->genctr
) != genctr
)) {
519 dev_err(nvme_req(rq
)->ctrl
->device
,
520 "request %#x genctr mismatch (got %#x expected %#x)\n",
521 tag
, genctr
, nvme_genctr_mask(nvme_req(rq
)->genctr
));
527 static inline struct request
*nvme_cid_to_rq(struct blk_mq_tags
*tags
,
530 return blk_mq_tag_to_rq(tags
, nvme_tag_from_cid(command_id
));
533 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
534 void nvme_fault_inject_init(struct nvme_fault_inject
*fault_inj
,
535 const char *dev_name
);
536 void nvme_fault_inject_fini(struct nvme_fault_inject
*fault_inject
);
537 void nvme_should_fail(struct request
*req
);
539 static inline void nvme_fault_inject_init(struct nvme_fault_inject
*fault_inj
,
540 const char *dev_name
)
543 static inline void nvme_fault_inject_fini(struct nvme_fault_inject
*fault_inj
)
546 static inline void nvme_should_fail(struct request
*req
) {}
549 static inline int nvme_reset_subsystem(struct nvme_ctrl
*ctrl
)
551 if (!ctrl
->subsystem
)
553 return ctrl
->ops
->reg_write32(ctrl
, NVME_REG_NSSR
, 0x4E564D65);
557 * Convert a 512B sector number to a device logical block number.
559 static inline u64
nvme_sect_to_lba(struct nvme_ns
*ns
, sector_t sector
)
561 return sector
>> (ns
->lba_shift
- SECTOR_SHIFT
);
565 * Convert a device logical block number to a 512B sector number.
567 static inline sector_t
nvme_lba_to_sect(struct nvme_ns
*ns
, u64 lba
)
569 return lba
<< (ns
->lba_shift
- SECTOR_SHIFT
);
573 * Convert byte length to nvme's 0-based num dwords
575 static inline u32
nvme_bytes_to_numd(size_t len
)
577 return (len
>> 2) - 1;
580 static inline bool nvme_is_ana_error(u16 status
)
582 switch (status
& 0x7ff) {
583 case NVME_SC_ANA_TRANSITION
:
584 case NVME_SC_ANA_INACCESSIBLE
:
585 case NVME_SC_ANA_PERSISTENT_LOSS
:
592 static inline bool nvme_is_path_error(u16 status
)
594 /* check for a status code type of 'path related status' */
595 return (status
& 0x700) == 0x300;
599 * Fill in the status and result information from the CQE, and then figure out
600 * if blk-mq will need to use IPI magic to complete the request, and if yes do
601 * so. If not let the caller complete the request without an indirect function
604 static inline bool nvme_try_complete_req(struct request
*req
, __le16 status
,
605 union nvme_result result
)
607 struct nvme_request
*rq
= nvme_req(req
);
609 rq
->status
= le16_to_cpu(status
) >> 1;
611 /* inject error when permitted by fault injection framework */
612 nvme_should_fail(req
);
613 if (unlikely(blk_should_fake_timeout(req
->q
)))
615 return blk_mq_complete_request_remote(req
);
618 static inline void nvme_get_ctrl(struct nvme_ctrl
*ctrl
)
620 get_device(ctrl
->device
);
623 static inline void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
625 put_device(ctrl
->device
);
628 static inline bool nvme_is_aen_req(u16 qid
, __u16 command_id
)
631 nvme_tag_from_cid(command_id
) >= NVME_AQ_BLK_MQ_DEPTH
;
634 void nvme_complete_rq(struct request
*req
);
635 blk_status_t
nvme_host_path_error(struct request
*req
);
636 bool nvme_cancel_request(struct request
*req
, void *data
, bool reserved
);
637 void nvme_cancel_tagset(struct nvme_ctrl
*ctrl
);
638 void nvme_cancel_admin_tagset(struct nvme_ctrl
*ctrl
);
639 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
640 enum nvme_ctrl_state new_state
);
641 bool nvme_wait_reset(struct nvme_ctrl
*ctrl
);
642 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
);
643 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
);
644 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
);
645 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
646 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
);
647 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
);
648 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
);
649 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
);
650 int nvme_init_ctrl_finish(struct nvme_ctrl
*ctrl
);
652 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
);
654 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
657 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
658 volatile union nvme_result
*res
);
660 void nvme_stop_queues(struct nvme_ctrl
*ctrl
);
661 void nvme_start_queues(struct nvme_ctrl
*ctrl
);
662 void nvme_kill_queues(struct nvme_ctrl
*ctrl
);
663 void nvme_sync_queues(struct nvme_ctrl
*ctrl
);
664 void nvme_sync_io_queues(struct nvme_ctrl
*ctrl
);
665 void nvme_unfreeze(struct nvme_ctrl
*ctrl
);
666 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
);
667 int nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
);
668 void nvme_start_freeze(struct nvme_ctrl
*ctrl
);
670 #define NVME_QID_ANY -1
671 struct request
*nvme_alloc_request(struct request_queue
*q
,
672 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
);
673 void nvme_cleanup_cmd(struct request
*req
);
674 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
);
675 blk_status_t
nvme_fail_nonready_command(struct nvme_ctrl
*ctrl
,
676 struct request
*req
);
677 bool __nvme_check_ready(struct nvme_ctrl
*ctrl
, struct request
*rq
,
680 static inline bool nvme_check_ready(struct nvme_ctrl
*ctrl
, struct request
*rq
,
683 if (likely(ctrl
->state
== NVME_CTRL_LIVE
))
685 if (ctrl
->ops
->flags
& NVME_F_FABRICS
&&
686 ctrl
->state
== NVME_CTRL_DELETING
)
688 return __nvme_check_ready(ctrl
, rq
, queue_live
);
690 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
691 void *buf
, unsigned bufflen
);
692 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
693 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
694 unsigned timeout
, int qid
, int at_head
,
695 blk_mq_req_flags_t flags
);
696 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned int fid
,
697 unsigned int dword11
, void *buffer
, size_t buflen
,
699 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned int fid
,
700 unsigned int dword11
, void *buffer
, size_t buflen
,
702 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
);
703 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
);
704 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
);
705 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
);
706 int nvme_try_sched_reset(struct nvme_ctrl
*ctrl
);
707 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
);
708 void nvme_queue_scan(struct nvme_ctrl
*ctrl
);
709 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
, u8 csi
,
710 void *log
, size_t size
, u64 offset
);
711 bool nvme_tryget_ns_head(struct nvme_ns_head
*head
);
712 void nvme_put_ns_head(struct nvme_ns_head
*head
);
713 int nvme_cdev_add(struct cdev
*cdev
, struct device
*cdev_device
,
714 const struct file_operations
*fops
, struct module
*owner
);
715 void nvme_cdev_del(struct cdev
*cdev
, struct device
*cdev_device
);
716 int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
717 unsigned int cmd
, unsigned long arg
);
718 long nvme_ns_chr_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
);
719 int nvme_ns_head_ioctl(struct block_device
*bdev
, fmode_t mode
,
720 unsigned int cmd
, unsigned long arg
);
721 long nvme_ns_head_chr_ioctl(struct file
*file
, unsigned int cmd
,
723 long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
725 int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
);
727 extern const struct attribute_group
*nvme_ns_id_attr_groups
[];
728 extern const struct pr_ops nvme_pr_ops
;
729 extern const struct block_device_operations nvme_ns_head_ops
;
731 struct nvme_ns
*nvme_find_path(struct nvme_ns_head
*head
);
732 #ifdef CONFIG_NVME_MULTIPATH
733 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl
*ctrl
)
735 return ctrl
->ana_log_buf
!= NULL
;
738 void nvme_mpath_unfreeze(struct nvme_subsystem
*subsys
);
739 void nvme_mpath_wait_freeze(struct nvme_subsystem
*subsys
);
740 void nvme_mpath_start_freeze(struct nvme_subsystem
*subsys
);
741 bool nvme_mpath_set_disk_name(struct nvme_ns
*ns
, char *disk_name
, int *flags
);
742 void nvme_failover_req(struct request
*req
);
743 void nvme_kick_requeue_lists(struct nvme_ctrl
*ctrl
);
744 int nvme_mpath_alloc_disk(struct nvme_ctrl
*ctrl
,struct nvme_ns_head
*head
);
745 void nvme_mpath_add_disk(struct nvme_ns
*ns
, struct nvme_id_ns
*id
);
746 void nvme_mpath_remove_disk(struct nvme_ns_head
*head
);
747 int nvme_mpath_init_identify(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
);
748 void nvme_mpath_init_ctrl(struct nvme_ctrl
*ctrl
);
749 void nvme_mpath_uninit(struct nvme_ctrl
*ctrl
);
750 void nvme_mpath_stop(struct nvme_ctrl
*ctrl
);
751 bool nvme_mpath_clear_current_path(struct nvme_ns
*ns
);
752 void nvme_mpath_revalidate_paths(struct nvme_ns
*ns
);
753 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl
*ctrl
);
754 void nvme_mpath_shutdown_disk(struct nvme_ns_head
*head
);
756 static inline void nvme_trace_bio_complete(struct request
*req
)
758 struct nvme_ns
*ns
= req
->q
->queuedata
;
760 if (req
->cmd_flags
& REQ_NVME_MPATH
)
761 trace_block_bio_complete(ns
->head
->disk
->queue
, req
->bio
);
764 extern struct device_attribute dev_attr_ana_grpid
;
765 extern struct device_attribute dev_attr_ana_state
;
766 extern struct device_attribute subsys_attr_iopolicy
;
769 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl
*ctrl
)
773 static inline bool nvme_mpath_set_disk_name(struct nvme_ns
*ns
, char *disk_name
,
778 static inline void nvme_failover_req(struct request
*req
)
781 static inline void nvme_kick_requeue_lists(struct nvme_ctrl
*ctrl
)
784 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl
*ctrl
,
785 struct nvme_ns_head
*head
)
789 static inline void nvme_mpath_add_disk(struct nvme_ns
*ns
,
790 struct nvme_id_ns
*id
)
793 static inline void nvme_mpath_remove_disk(struct nvme_ns_head
*head
)
796 static inline bool nvme_mpath_clear_current_path(struct nvme_ns
*ns
)
800 static inline void nvme_mpath_revalidate_paths(struct nvme_ns
*ns
)
803 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl
*ctrl
)
806 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head
*head
)
809 static inline void nvme_trace_bio_complete(struct request
*req
)
812 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl
*ctrl
)
815 static inline int nvme_mpath_init_identify(struct nvme_ctrl
*ctrl
,
816 struct nvme_id_ctrl
*id
)
818 if (ctrl
->subsys
->cmic
& NVME_CTRL_CMIC_ANA
)
819 dev_warn(ctrl
->device
,
820 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
823 static inline void nvme_mpath_uninit(struct nvme_ctrl
*ctrl
)
826 static inline void nvme_mpath_stop(struct nvme_ctrl
*ctrl
)
829 static inline void nvme_mpath_unfreeze(struct nvme_subsystem
*subsys
)
832 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem
*subsys
)
835 static inline void nvme_mpath_start_freeze(struct nvme_subsystem
*subsys
)
838 #endif /* CONFIG_NVME_MULTIPATH */
840 int nvme_revalidate_zones(struct nvme_ns
*ns
);
841 int nvme_ns_report_zones(struct nvme_ns
*ns
, sector_t sector
,
842 unsigned int nr_zones
, report_zones_cb cb
, void *data
);
843 #ifdef CONFIG_BLK_DEV_ZONED
844 int nvme_update_zone_info(struct nvme_ns
*ns
, unsigned lbaf
);
845 blk_status_t
nvme_setup_zone_mgmt_send(struct nvme_ns
*ns
, struct request
*req
,
846 struct nvme_command
*cmnd
,
847 enum nvme_zone_mgmt_action action
);
849 static inline blk_status_t
nvme_setup_zone_mgmt_send(struct nvme_ns
*ns
,
850 struct request
*req
, struct nvme_command
*cmnd
,
851 enum nvme_zone_mgmt_action action
)
853 return BLK_STS_NOTSUPP
;
856 static inline int nvme_update_zone_info(struct nvme_ns
*ns
, unsigned lbaf
)
858 dev_warn(ns
->ctrl
->device
,
859 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
860 return -EPROTONOSUPPORT
;
864 static inline struct nvme_ns
*nvme_get_ns_from_dev(struct device
*dev
)
866 return dev_to_disk(dev
)->private_data
;
869 #ifdef CONFIG_NVME_HWMON
870 int nvme_hwmon_init(struct nvme_ctrl
*ctrl
);
871 void nvme_hwmon_exit(struct nvme_ctrl
*ctrl
);
873 static inline int nvme_hwmon_init(struct nvme_ctrl
*ctrl
)
878 static inline void nvme_hwmon_exit(struct nvme_ctrl
*ctrl
)
883 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl
*ctrl
)
885 return ctrl
->sgls
& ((1 << 0) | (1 << 1));
888 u32
nvme_command_effects(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
890 int nvme_execute_passthru_rq(struct request
*rq
);
891 struct nvme_ctrl
*nvme_ctrl_from_file(struct file
*file
);
892 struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
);
893 void nvme_put_ns(struct nvme_ns
*ns
);
895 static inline bool nvme_multi_css(struct nvme_ctrl
*ctrl
)
897 return (ctrl
->ctrl_config
& NVME_CC_CSS_MASK
) == NVME_CC_CSS_CSI
;