2 * Copyright (c) 2011-2014, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/nvme.h>
18 #include <linux/pci.h>
19 #include <linux/kref.h>
20 #include <linux/blk-mq.h>
21 #include <linux/lightnvm.h>
22 #include <linux/sed-opal.h>
24 extern unsigned char nvme_io_timeout
;
25 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
27 extern unsigned char admin_timeout
;
28 #define ADMIN_TIMEOUT (admin_timeout * HZ)
30 #define NVME_DEFAULT_KATO 5
31 #define NVME_KATO_GRACE 10
33 extern struct workqueue_struct
*nvme_wq
;
41 * List of workarounds for devices that required behavior not specified in
46 * Prefers I/O aligned to a stripe size specified in a vendor
47 * specific Identify field.
49 NVME_QUIRK_STRIPE_SIZE
= (1 << 0),
52 * The controller doesn't handle Identify value others than 0 or 1
55 NVME_QUIRK_IDENTIFY_CNS
= (1 << 1),
58 * The controller deterministically returns O's on reads to
59 * logical blocks that deallocate was called on.
61 NVME_QUIRK_DEALLOCATE_ZEROES
= (1 << 2),
64 * The controller needs a delay before starts checking the device
65 * readiness, which is done by reading the NVME_CSTS_RDY bit.
67 NVME_QUIRK_DELAY_BEFORE_CHK_RDY
= (1 << 3),
70 * APST should not be used.
72 NVME_QUIRK_NO_APST
= (1 << 4),
75 * The deepest sleep state should not be used.
77 NVME_QUIRK_NO_DEEPEST_PS
= (1 << 5),
80 * Supports the LighNVM command set if indicated in vs[1].
82 NVME_QUIRK_LIGHTNVM
= (1 << 6),
86 * Common request structure for NVMe passthrough. All drivers must have
87 * this structure as the first member of their request-private data.
90 struct nvme_command
*cmd
;
91 union nvme_result result
;
98 NVME_REQ_CANCELLED
= (1 << 0),
101 static inline struct nvme_request
*nvme_req(struct request
*req
)
103 return blk_mq_rq_to_pdu(req
);
106 /* The below value is the specific amount of delay needed before checking
107 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
108 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
111 #define NVME_QUIRK_DELAY_AMOUNT 2000
113 enum nvme_ctrl_state
{
117 NVME_CTRL_RECONNECTING
,
123 enum nvme_ctrl_state state
;
126 const struct nvme_ctrl_ops
*ops
;
127 struct request_queue
*admin_q
;
128 struct request_queue
*connect_q
;
132 struct blk_mq_tag_set
*tagset
;
133 struct blk_mq_tag_set
*admin_tagset
;
134 struct list_head namespaces
;
135 struct mutex namespaces_mutex
;
136 struct device
*device
; /* char device */
137 struct list_head node
;
139 struct work_struct reset_work
;
141 struct opal_dev
*opal_dev
;
146 char firmware_rev
[8];
147 char subnqn
[NVMF_NQN_SIZE
];
162 atomic_t abort_limit
;
170 unsigned int shutdown_timeout
;
173 unsigned long quirks
;
174 struct nvme_id_power_state psd
[32];
175 struct work_struct scan_work
;
176 struct work_struct async_event_work
;
177 struct delayed_work ka_work
;
178 struct work_struct fw_act_work
;
180 /* Power saving configuration */
181 u64 ps_max_latency_us
;
197 struct nvmf_ctrl_options
*opts
;
201 struct list_head list
;
203 struct nvme_ctrl
*ctrl
;
204 struct request_queue
*queue
;
205 struct gendisk
*disk
;
206 struct nvm_dev
*ndev
;
222 #define NVME_NS_REMOVING 0
223 #define NVME_NS_DEAD 1
227 struct nvme_ctrl_ops
{
229 struct module
*module
;
231 #define NVME_F_FABRICS (1 << 0)
232 #define NVME_F_METADATA_SUPPORTED (1 << 1)
233 int (*reg_read32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
);
234 int (*reg_write32
)(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
);
235 int (*reg_read64
)(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
);
236 void (*free_ctrl
)(struct nvme_ctrl
*ctrl
);
237 void (*submit_async_event
)(struct nvme_ctrl
*ctrl
, int aer_idx
);
238 int (*delete_ctrl
)(struct nvme_ctrl
*ctrl
);
239 int (*get_address
)(struct nvme_ctrl
*ctrl
, char *buf
, int size
);
242 static inline bool nvme_ctrl_ready(struct nvme_ctrl
*ctrl
)
246 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &val
))
248 return val
& NVME_CSTS_RDY
;
251 static inline int nvme_reset_subsystem(struct nvme_ctrl
*ctrl
)
253 if (!ctrl
->subsystem
)
255 return ctrl
->ops
->reg_write32(ctrl
, NVME_REG_NSSR
, 0x4E564D65);
258 static inline u64
nvme_block_nr(struct nvme_ns
*ns
, sector_t sector
)
260 return (sector
>> (ns
->lba_shift
- 9));
263 static inline void nvme_cleanup_cmd(struct request
*req
)
265 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
266 kfree(page_address(req
->special_vec
.bv_page
) +
267 req
->special_vec
.bv_offset
);
271 static inline void nvme_end_request(struct request
*req
, __le16 status
,
272 union nvme_result result
)
274 struct nvme_request
*rq
= nvme_req(req
);
276 rq
->status
= le16_to_cpu(status
) >> 1;
278 blk_mq_complete_request(req
);
281 void nvme_complete_rq(struct request
*req
);
282 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
);
283 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
284 enum nvme_ctrl_state new_state
);
285 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
);
286 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
);
287 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
);
288 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
289 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
);
290 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
);
291 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
);
292 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
);
293 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
);
294 int nvme_init_identify(struct nvme_ctrl
*ctrl
);
296 void nvme_queue_scan(struct nvme_ctrl
*ctrl
);
297 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
);
299 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
302 #define NVME_NR_AERS 1
303 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
304 union nvme_result
*res
);
305 void nvme_queue_async_events(struct nvme_ctrl
*ctrl
);
307 void nvme_stop_queues(struct nvme_ctrl
*ctrl
);
308 void nvme_start_queues(struct nvme_ctrl
*ctrl
);
309 void nvme_kill_queues(struct nvme_ctrl
*ctrl
);
310 void nvme_unfreeze(struct nvme_ctrl
*ctrl
);
311 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
);
312 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
);
313 void nvme_start_freeze(struct nvme_ctrl
*ctrl
);
315 #define NVME_QID_ANY -1
316 struct request
*nvme_alloc_request(struct request_queue
*q
,
317 struct nvme_command
*cmd
, unsigned int flags
, int qid
);
318 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
319 struct nvme_command
*cmd
);
320 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
321 void *buf
, unsigned bufflen
);
322 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
323 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
324 unsigned timeout
, int qid
, int at_head
, int flags
);
325 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
);
326 void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
);
327 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
);
328 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
);
331 int nvme_nvm_register(struct nvme_ns
*ns
, char *disk_name
, int node
);
332 void nvme_nvm_unregister(struct nvme_ns
*ns
);
333 int nvme_nvm_register_sysfs(struct nvme_ns
*ns
);
334 void nvme_nvm_unregister_sysfs(struct nvme_ns
*ns
);
335 int nvme_nvm_ioctl(struct nvme_ns
*ns
, unsigned int cmd
, unsigned long arg
);
337 static inline int nvme_nvm_register(struct nvme_ns
*ns
, char *disk_name
,
343 static inline void nvme_nvm_unregister(struct nvme_ns
*ns
) {};
344 static inline int nvme_nvm_register_sysfs(struct nvme_ns
*ns
)
348 static inline void nvme_nvm_unregister_sysfs(struct nvme_ns
*ns
) {};
349 static inline int nvme_nvm_ioctl(struct nvme_ns
*ns
, unsigned int cmd
,
354 #endif /* CONFIG_NVM */
356 static inline struct nvme_ns
*nvme_get_ns_from_dev(struct device
*dev
)
358 return dev_to_disk(dev
)->private_data
;
361 int __init
nvme_core_init(void);
362 void nvme_core_exit(void);