2 * Copyright (c) 2011-2014, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/nvme.h>
18 #include <linux/cdev.h>
19 #include <linux/pci.h>
20 #include <linux/kref.h>
21 #include <linux/blk-mq.h>
22 #include <linux/lightnvm.h>
23 #include <linux/sed-opal.h>
24 #include <linux/fault-inject.h>
25 #include <linux/rcupdate.h>
27 extern unsigned int nvme_io_timeout
;
28 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
30 extern unsigned int admin_timeout
;
31 #define ADMIN_TIMEOUT (admin_timeout * HZ)
33 #define NVME_DEFAULT_KATO 5
34 #define NVME_KATO_GRACE 10
36 extern struct workqueue_struct
*nvme_wq
;
37 extern struct workqueue_struct
*nvme_reset_wq
;
38 extern struct workqueue_struct
*nvme_delete_wq
;
46 * List of workarounds for devices that required behavior not specified in
51 * Prefers I/O aligned to a stripe size specified in a vendor
52 * specific Identify field.
54 NVME_QUIRK_STRIPE_SIZE
= (1 << 0),
57 * The controller doesn't handle Identify value others than 0 or 1
60 NVME_QUIRK_IDENTIFY_CNS
= (1 << 1),
63 * The controller deterministically returns O's on reads to
64 * logical blocks that deallocate was called on.
66 NVME_QUIRK_DEALLOCATE_ZEROES
= (1 << 2),
69 * The controller needs a delay before starts checking the device
70 * readiness, which is done by reading the NVME_CSTS_RDY bit.
72 NVME_QUIRK_DELAY_BEFORE_CHK_RDY
= (1 << 3),
75 * APST should not be used.
77 NVME_QUIRK_NO_APST
= (1 << 4),
80 * The deepest sleep state should not be used.
82 NVME_QUIRK_NO_DEEPEST_PS
= (1 << 5),
85 * Supports the LighNVM command set if indicated in vs[1].
87 NVME_QUIRK_LIGHTNVM
= (1 << 6),
90 * Set MEDIUM priority on SQ creation
92 NVME_QUIRK_MEDIUM_PRIO_SQ
= (1 << 7),
95 * Ignore device provided subnqn.
97 NVME_QUIRK_IGNORE_DEV_SUBNQN
= (1 << 8),
101 * Common request structure for NVMe passthrough. All drivers must have
102 * this structure as the first member of their request-private data.
104 struct nvme_request
{
105 struct nvme_command
*cmd
;
106 union nvme_result result
;
110 struct nvme_ctrl
*ctrl
;
114 * Mark a bio as coming in through the mpath node.
116 #define REQ_NVME_MPATH REQ_DRV
119 NVME_REQ_CANCELLED
= (1 << 0),
120 NVME_REQ_USERCMD
= (1 << 1),
123 static inline struct nvme_request
*nvme_req(struct request
*req
)
125 return blk_mq_rq_to_pdu(req
);
128 static inline u16
nvme_req_qid(struct request
*req
)
132 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req
)) + 1;
135 /* The below value is the specific amount of delay needed before checking
136 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
137 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
140 #define NVME_QUIRK_DELAY_AMOUNT 2300
142 enum nvme_ctrl_state
{
145 NVME_CTRL_ADMIN_ONLY
, /* Only admin queue live */
147 NVME_CTRL_CONNECTING
,
154 enum nvme_ctrl_state state
;
157 const struct nvme_ctrl_ops
*ops
;
158 struct request_queue
*admin_q
;
159 struct request_queue
*connect_q
;
163 struct blk_mq_tag_set
*tagset
;
164 struct blk_mq_tag_set
*admin_tagset
;
165 struct list_head namespaces
;
166 struct rw_semaphore namespaces_rwsem
;
167 struct device ctrl_device
;
168 struct device
*device
; /* char device */
170 struct work_struct reset_work
;
171 struct work_struct delete_work
;
173 struct nvme_subsystem
*subsys
;
174 struct list_head subsys_entry
;
176 struct opal_dev
*opal_dev
;
195 atomic_t abort_limit
;
205 unsigned int shutdown_timeout
;
208 unsigned long quirks
;
209 struct nvme_id_power_state psd
[32];
210 struct nvme_effects_log
*effects
;
211 struct work_struct scan_work
;
212 struct work_struct async_event_work
;
213 struct delayed_work ka_work
;
214 struct nvme_command ka_cmd
;
215 struct work_struct fw_act_work
;
216 unsigned long events
;
218 #ifdef CONFIG_NVME_MULTIPATH
219 /* asymmetric namespace access: */
224 struct mutex ana_lock
;
225 struct nvme_ana_rsp_hdr
*ana_log_buf
;
227 struct timer_list anatt_timer
;
228 struct work_struct ana_work
;
231 /* Power saving configuration */
232 u64 ps_max_latency_us
;
248 struct nvmf_ctrl_options
*opts
;
250 struct page
*discard_page
;
251 unsigned long discard_page_busy
;
254 struct nvme_subsystem
{
258 * Because we unregister the device on the last put we need
259 * a separate refcount.
262 struct list_head entry
;
264 struct list_head ctrls
;
265 struct list_head nsheads
;
266 char subnqn
[NVMF_NQN_SIZE
];
269 char firmware_rev
[8];
276 * Container structure for uniqueue namespace identifiers.
285 * Anchor structure for namespaces. There is one for each namespace in a
286 * NVMe subsystem that any of our controllers can see, and the namespace
287 * structure for each controller is chained of it. For private namespaces
288 * there is a 1:1 relation to our namespace structures, that is ->list
289 * only ever has a single entry for private namespaces.
291 struct nvme_ns_head
{
292 struct list_head list
;
293 struct srcu_struct srcu
;
294 struct nvme_subsystem
*subsys
;
296 struct nvme_ns_ids ids
;
297 struct list_head entry
;
300 #ifdef CONFIG_NVME_MULTIPATH
301 struct gendisk
*disk
;
302 struct bio_list requeue_list
;
303 spinlock_t requeue_lock
;
304 struct work_struct requeue_work
;
306 struct nvme_ns __rcu
*current_path
[];
310 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
311 struct nvme_fault_inject
{
312 struct fault_attr attr
;
313 struct dentry
*parent
;
314 bool dont_retry
; /* DNR, do not retry */
315 u16 status
; /* status code */
320 struct list_head list
;
322 struct nvme_ctrl
*ctrl
;
323 struct request_queue
*queue
;
324 struct gendisk
*disk
;
325 #ifdef CONFIG_NVME_MULTIPATH
326 enum nvme_ana_state ana_state
;
329 struct list_head siblings
;
330 struct nvm_dev
*ndev
;
332 struct nvme_ns_head
*head
;
341 #define NVME_NS_REMOVING 0
342 #define NVME_NS_DEAD 1
343 #define NVME_NS_ANA_PENDING 2
346 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
347 struct nvme_fault_inject fault_inject
;
352 struct nvme_ctrl_ops
{
354 struct module
*module
;
356 #define NVME_F_FABRICS (1 << 0)
357 #define NVME_F_METADATA_SUPPORTED (1 << 1)
358 #define NVME_F_PCI_P2PDMA (1 << 2)
359 int (*reg_read32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
);
360 int (*reg_write32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
);
361 int (*reg_read64
)(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
);
362 void (*free_ctrl
)(struct nvme_ctrl
*ctrl
);
363 void (*submit_async_event
)(struct nvme_ctrl
*ctrl
);
364 void (*delete_ctrl
)(struct nvme_ctrl
*ctrl
);
365 int (*get_address
)(struct nvme_ctrl
*ctrl
, char *buf
, int size
);
366 void (*stop_ctrl
)(struct nvme_ctrl
*ctrl
);
369 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
370 void nvme_fault_inject_init(struct nvme_ns
*ns
);
371 void nvme_fault_inject_fini(struct nvme_ns
*ns
);
372 void nvme_should_fail(struct request
*req
);
374 static inline void nvme_fault_inject_init(struct nvme_ns
*ns
) {}
375 static inline void nvme_fault_inject_fini(struct nvme_ns
*ns
) {}
376 static inline void nvme_should_fail(struct request
*req
) {}
379 static inline int nvme_reset_subsystem(struct nvme_ctrl
*ctrl
)
381 if (!ctrl
->subsystem
)
383 return ctrl
->ops
->reg_write32(ctrl
, NVME_REG_NSSR
, 0x4E564D65);
386 static inline u64
nvme_block_nr(struct nvme_ns
*ns
, sector_t sector
)
388 return (sector
>> (ns
->lba_shift
- 9));
391 static inline void nvme_end_request(struct request
*req
, __le16 status
,
392 union nvme_result result
)
394 struct nvme_request
*rq
= nvme_req(req
);
396 rq
->status
= le16_to_cpu(status
) >> 1;
398 /* inject error when permitted by fault injection framework */
399 nvme_should_fail(req
);
400 blk_mq_complete_request(req
);
403 static inline void nvme_get_ctrl(struct nvme_ctrl
*ctrl
)
405 get_device(ctrl
->device
);
408 static inline void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
410 put_device(ctrl
->device
);
413 void nvme_complete_rq(struct request
*req
);
414 bool nvme_cancel_request(struct request
*req
, void *data
, bool reserved
);
415 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
416 enum nvme_ctrl_state new_state
);
417 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
);
418 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
);
419 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
);
420 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
421 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
);
422 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
);
423 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
);
424 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
);
425 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
);
426 int nvme_init_identify(struct nvme_ctrl
*ctrl
);
428 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
);
430 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
433 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
434 volatile union nvme_result
*res
);
436 void nvme_stop_queues(struct nvme_ctrl
*ctrl
);
437 void nvme_start_queues(struct nvme_ctrl
*ctrl
);
438 void nvme_kill_queues(struct nvme_ctrl
*ctrl
);
439 void nvme_unfreeze(struct nvme_ctrl
*ctrl
);
440 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
);
441 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
);
442 void nvme_start_freeze(struct nvme_ctrl
*ctrl
);
444 #define NVME_QID_ANY -1
445 struct request
*nvme_alloc_request(struct request_queue
*q
,
446 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
, int qid
);
447 void nvme_cleanup_cmd(struct request
*req
);
448 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
449 struct nvme_command
*cmd
);
450 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
451 void *buf
, unsigned bufflen
);
452 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
453 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
454 unsigned timeout
, int qid
, int at_head
,
455 blk_mq_req_flags_t flags
, bool poll
);
456 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
);
457 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
);
458 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
);
459 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
);
460 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
);
461 int nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
);
463 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
,
464 void *log
, size_t size
, u64 offset
);
466 extern const struct attribute_group
*nvme_ns_id_attr_groups
[];
467 extern const struct block_device_operations nvme_ns_head_ops
;
469 #ifdef CONFIG_NVME_MULTIPATH
470 bool nvme_ctrl_use_ana(struct nvme_ctrl
*ctrl
);
471 void nvme_set_disk_name(char *disk_name
, struct nvme_ns
*ns
,
472 struct nvme_ctrl
*ctrl
, int *flags
);
473 void nvme_failover_req(struct request
*req
);
474 void nvme_kick_requeue_lists(struct nvme_ctrl
*ctrl
);
475 int nvme_mpath_alloc_disk(struct nvme_ctrl
*ctrl
,struct nvme_ns_head
*head
);
476 void nvme_mpath_add_disk(struct nvme_ns
*ns
, struct nvme_id_ns
*id
);
477 void nvme_mpath_remove_disk(struct nvme_ns_head
*head
);
478 int nvme_mpath_init(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
);
479 void nvme_mpath_uninit(struct nvme_ctrl
*ctrl
);
480 void nvme_mpath_stop(struct nvme_ctrl
*ctrl
);
481 void nvme_mpath_clear_current_path(struct nvme_ns
*ns
);
482 struct nvme_ns
*nvme_find_path(struct nvme_ns_head
*head
);
484 static inline void nvme_mpath_check_last_path(struct nvme_ns
*ns
)
486 struct nvme_ns_head
*head
= ns
->head
;
488 if (head
->disk
&& list_empty(&head
->list
))
489 kblockd_schedule_work(&head
->requeue_work
);
492 extern struct device_attribute dev_attr_ana_grpid
;
493 extern struct device_attribute dev_attr_ana_state
;
496 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl
*ctrl
)
501 * Without the multipath code enabled, multiple controller per subsystems are
502 * visible as devices and thus we cannot use the subsystem instance.
504 static inline void nvme_set_disk_name(char *disk_name
, struct nvme_ns
*ns
,
505 struct nvme_ctrl
*ctrl
, int *flags
)
507 sprintf(disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->head
->instance
);
510 static inline void nvme_failover_req(struct request
*req
)
513 static inline void nvme_kick_requeue_lists(struct nvme_ctrl
*ctrl
)
516 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl
*ctrl
,
517 struct nvme_ns_head
*head
)
521 static inline void nvme_mpath_add_disk(struct nvme_ns
*ns
,
522 struct nvme_id_ns
*id
)
525 static inline void nvme_mpath_remove_disk(struct nvme_ns_head
*head
)
528 static inline void nvme_mpath_clear_current_path(struct nvme_ns
*ns
)
531 static inline void nvme_mpath_check_last_path(struct nvme_ns
*ns
)
534 static inline int nvme_mpath_init(struct nvme_ctrl
*ctrl
,
535 struct nvme_id_ctrl
*id
)
537 if (ctrl
->subsys
->cmic
& (1 << 3))
538 dev_warn(ctrl
->device
,
539 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
542 static inline void nvme_mpath_uninit(struct nvme_ctrl
*ctrl
)
545 static inline void nvme_mpath_stop(struct nvme_ctrl
*ctrl
)
548 #endif /* CONFIG_NVME_MULTIPATH */
551 int nvme_nvm_register(struct nvme_ns
*ns
, char *disk_name
, int node
);
552 void nvme_nvm_unregister(struct nvme_ns
*ns
);
553 extern const struct attribute_group nvme_nvm_attr_group
;
554 int nvme_nvm_ioctl(struct nvme_ns
*ns
, unsigned int cmd
, unsigned long arg
);
556 static inline int nvme_nvm_register(struct nvme_ns
*ns
, char *disk_name
,
562 static inline void nvme_nvm_unregister(struct nvme_ns
*ns
) {};
563 static inline int nvme_nvm_ioctl(struct nvme_ns
*ns
, unsigned int cmd
,
568 #endif /* CONFIG_NVM */
570 static inline struct nvme_ns
*nvme_get_ns_from_dev(struct device
*dev
)
572 return dev_to_disk(dev
)->private_data
;
575 int __init
nvme_core_init(void);
576 void __exit
nvme_core_exit(void);