]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/nvme/host/nvme.h
nvmet: constify struct nvmet_fabrics_ops
[mirror_ubuntu-jammy-kernel.git] / drivers / nvme / host / nvme.h
CommitLineData
f11bb3e2
CH
1/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
a6a5149b 18#include <linux/cdev.h>
f11bb3e2
CH
19#include <linux/pci.h>
20#include <linux/kref.h>
21#include <linux/blk-mq.h>
b0b4e09c 22#include <linux/lightnvm.h>
a98e58e5 23#include <linux/sed-opal.h>
b9e03857 24#include <linux/fault-inject.h>
f11bb3e2 25
8ae4e447 26extern unsigned int nvme_io_timeout;
f11bb3e2
CH
27#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
28
8ae4e447 29extern unsigned int admin_timeout;
21d34711
CH
30#define ADMIN_TIMEOUT (admin_timeout * HZ)
31
038bd4cb
SG
32#define NVME_DEFAULT_KATO 5
33#define NVME_KATO_GRACE 10
34
9a6327d2 35extern struct workqueue_struct *nvme_wq;
b227c59b
RS
36extern struct workqueue_struct *nvme_reset_wq;
37extern struct workqueue_struct *nvme_delete_wq;
9a6327d2 38
ca064085
MB
39enum {
40 NVME_NS_LBA = 0,
41 NVME_NS_LIGHTNVM = 1,
42};
43
f11bb3e2 44/*
106198ed
CH
45 * List of workarounds for devices that required behavior not specified in
46 * the standard.
f11bb3e2 47 */
106198ed
CH
48enum nvme_quirks {
49 /*
50 * Prefers I/O aligned to a stripe size specified in a vendor
51 * specific Identify field.
52 */
53 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
540c801c
KB
54
55 /*
56 * The controller doesn't handle Identify value others than 0 or 1
57 * correctly.
58 */
59 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
08095e70
KB
60
61 /*
e850fd16
CH
62 * The controller deterministically returns O's on reads to
63 * logical blocks that deallocate was called on.
08095e70 64 */
e850fd16 65 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
54adc010
GP
66
67 /*
68 * The controller needs a delay before starts checking the device
69 * readiness, which is done by reading the NVME_CSTS_RDY bit.
70 */
71 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
c5552fde
AL
72
73 /*
74 * APST should not be used.
75 */
76 NVME_QUIRK_NO_APST = (1 << 4),
ff5350a8
AL
77
78 /*
79 * The deepest sleep state should not be used.
80 */
81 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
608cc4b1
CH
82
83 /*
84 * Supports the LighNVM command set if indicated in vs[1].
85 */
86 NVME_QUIRK_LIGHTNVM = (1 << 6),
106198ed
CH
87};
88
d49187e9
CH
89/*
90 * Common request structure for NVMe passthrough. All drivers must have
91 * this structure as the first member of their request-private data.
92 */
93struct nvme_request {
94 struct nvme_command *cmd;
95 union nvme_result result;
44e44b29 96 u8 retries;
27fa9bc5
CH
97 u8 flags;
98 u16 status;
99};
100
32acab31
CH
101/*
102 * Mark a bio as coming in through the mpath node.
103 */
104#define REQ_NVME_MPATH REQ_DRV
105
27fa9bc5
CH
106enum {
107 NVME_REQ_CANCELLED = (1 << 0),
d49187e9
CH
108};
109
110static inline struct nvme_request *nvme_req(struct request *req)
111{
112 return blk_mq_rq_to_pdu(req);
113}
114
54adc010
GP
115/* The below value is the specific amount of delay needed before checking
116 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
117 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
118 * found empirically.
119 */
8c97eecc 120#define NVME_QUIRK_DELAY_AMOUNT 2300
54adc010 121
bb8d261e
CH
122enum nvme_ctrl_state {
123 NVME_CTRL_NEW,
124 NVME_CTRL_LIVE,
2b1b7e78 125 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
bb8d261e 126 NVME_CTRL_RESETTING,
ad6a0a52 127 NVME_CTRL_CONNECTING,
bb8d261e 128 NVME_CTRL_DELETING,
0ff9d4e1 129 NVME_CTRL_DEAD,
bb8d261e
CH
130};
131
1c63dc66 132struct nvme_ctrl {
bb8d261e 133 enum nvme_ctrl_state state;
bd4da3ab 134 bool identified;
bb8d261e 135 spinlock_t lock;
1c63dc66 136 const struct nvme_ctrl_ops *ops;
f11bb3e2 137 struct request_queue *admin_q;
07bfcd09 138 struct request_queue *connect_q;
f11bb3e2 139 struct device *dev;
f11bb3e2 140 int instance;
5bae7f73 141 struct blk_mq_tag_set *tagset;
34b6c231 142 struct blk_mq_tag_set *admin_tagset;
f11bb3e2 143 struct list_head namespaces;
765cc031 144 struct rw_semaphore namespaces_rwsem;
d22524a4 145 struct device ctrl_device;
5bae7f73 146 struct device *device; /* char device */
a6a5149b 147 struct cdev cdev;
d86c4d8e 148 struct work_struct reset_work;
c5017e85 149 struct work_struct delete_work;
1c63dc66 150
ab9e00cc
CH
151 struct nvme_subsystem *subsys;
152 struct list_head subsys_entry;
153
4f1244c8 154 struct opal_dev *opal_dev;
a98e58e5 155
f11bb3e2 156 char name[12];
76e3914a 157 u16 cntlid;
5fd4ce1b
CH
158
159 u32 ctrl_config;
b6dccf7f 160 u16 mtfa;
d858e5f0 161 u32 queue_count;
5fd4ce1b 162
20d0dfe6 163 u64 cap;
5fd4ce1b 164 u32 page_size;
f11bb3e2 165 u32 max_hw_sectors;
f11bb3e2 166 u16 oncs;
8a9ae523 167 u16 oacs;
f5d11840
JA
168 u16 nssa;
169 u16 nr_streams;
6bf25d16 170 atomic_t abort_limit;
f11bb3e2 171 u8 vwc;
f3ca80fc 172 u32 vs;
07bfcd09 173 u32 sgls;
038bd4cb 174 u16 kas;
c5552fde
AL
175 u8 npss;
176 u8 apsta;
e3d7874d 177 u32 aen_result;
07fbd32a 178 unsigned int shutdown_timeout;
038bd4cb 179 unsigned int kato;
f3ca80fc 180 bool subsystem;
106198ed 181 unsigned long quirks;
c5552fde 182 struct nvme_id_power_state psd[32];
84fef62d 183 struct nvme_effects_log *effects;
5955be21 184 struct work_struct scan_work;
f866fc42 185 struct work_struct async_event_work;
038bd4cb 186 struct delayed_work ka_work;
0a34e466 187 struct nvme_command ka_cmd;
b6dccf7f 188 struct work_struct fw_act_work;
07bfcd09 189
c5552fde
AL
190 /* Power saving configuration */
191 u64 ps_max_latency_us;
76a5af84 192 bool apst_enabled;
c5552fde 193
044a9df1 194 /* PCIe only: */
fe6d53c9
CH
195 u32 hmpre;
196 u32 hmmin;
044a9df1
CH
197 u32 hmminds;
198 u16 hmmaxd;
fe6d53c9 199
07bfcd09
CH
200 /* Fabrics only */
201 u16 sqsize;
202 u32 ioccsz;
203 u32 iorcsz;
204 u16 icdoff;
205 u16 maxcmd;
fdf9dfa8 206 int nr_reconnects;
07bfcd09 207 struct nvmf_ctrl_options *opts;
f11bb3e2
CH
208};
209
ab9e00cc
CH
210struct nvme_subsystem {
211 int instance;
212 struct device dev;
213 /*
214 * Because we unregister the device on the last put we need
215 * a separate refcount.
216 */
217 struct kref ref;
218 struct list_head entry;
219 struct mutex lock;
220 struct list_head ctrls;
ed754e5d 221 struct list_head nsheads;
ab9e00cc
CH
222 char subnqn[NVMF_NQN_SIZE];
223 char serial[20];
224 char model[40];
225 char firmware_rev[8];
226 u8 cmic;
227 u16 vendor_id;
ed754e5d 228 struct ida ns_ida;
ab9e00cc
CH
229};
230
002fab04
CH
231/*
232 * Container structure for uniqueue namespace identifiers.
233 */
234struct nvme_ns_ids {
235 u8 eui64[8];
236 u8 nguid[16];
237 uuid_t uuid;
238};
239
ed754e5d
CH
240/*
241 * Anchor structure for namespaces. There is one for each namespace in a
242 * NVMe subsystem that any of our controllers can see, and the namespace
243 * structure for each controller is chained of it. For private namespaces
244 * there is a 1:1 relation to our namespace structures, that is ->list
245 * only ever has a single entry for private namespaces.
246 */
247struct nvme_ns_head {
32acab31
CH
248#ifdef CONFIG_NVME_MULTIPATH
249 struct gendisk *disk;
250 struct nvme_ns __rcu *current_path;
251 struct bio_list requeue_list;
252 spinlock_t requeue_lock;
253 struct work_struct requeue_work;
254#endif
ed754e5d
CH
255 struct list_head list;
256 struct srcu_struct srcu;
257 struct nvme_subsystem *subsys;
258 unsigned ns_id;
259 struct nvme_ns_ids ids;
260 struct list_head entry;
261 struct kref ref;
262 int instance;
263};
264
b9e03857
TT
265#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
266struct nvme_fault_inject {
267 struct fault_attr attr;
268 struct dentry *parent;
269 bool dont_retry; /* DNR, do not retry */
270 u16 status; /* status code */
271};
272#endif
273
f11bb3e2
CH
274struct nvme_ns {
275 struct list_head list;
276
1c63dc66 277 struct nvme_ctrl *ctrl;
f11bb3e2
CH
278 struct request_queue *queue;
279 struct gendisk *disk;
ed754e5d 280 struct list_head siblings;
b0b4e09c 281 struct nvm_dev *ndev;
f11bb3e2 282 struct kref kref;
ed754e5d 283 struct nvme_ns_head *head;
f11bb3e2 284
f11bb3e2
CH
285 int lba_shift;
286 u16 ms;
f5d11840
JA
287 u16 sgs;
288 u32 sws;
f11bb3e2
CH
289 bool ext;
290 u8 pi_type;
646017a6 291 unsigned long flags;
646017a6 292#define NVME_NS_REMOVING 0
69d9a99c 293#define NVME_NS_DEAD 1
57eeaf8e 294 u16 noiob;
b9e03857
TT
295
296#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
297 struct nvme_fault_inject fault_inject;
298#endif
299
f11bb3e2
CH
300};
301
1c63dc66 302struct nvme_ctrl_ops {
1a353d85 303 const char *name;
e439bb12 304 struct module *module;
d3d5b87d
CH
305 unsigned int flags;
306#define NVME_F_FABRICS (1 << 0)
c81bfba9 307#define NVME_F_METADATA_SUPPORTED (1 << 1)
1c63dc66 308 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
5fd4ce1b 309 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
7fd8930f 310 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
1673f1f0 311 void (*free_ctrl)(struct nvme_ctrl *ctrl);
ad22c355 312 void (*submit_async_event)(struct nvme_ctrl *ctrl);
c5017e85 313 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
1a353d85 314 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
31b84460 315 int (*reinit_request)(void *data, struct request *rq);
b435ecea 316 void (*stop_ctrl)(struct nvme_ctrl *ctrl);
f11bb3e2
CH
317};
318
b9e03857
TT
319#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
320void nvme_fault_inject_init(struct nvme_ns *ns);
321void nvme_fault_inject_fini(struct nvme_ns *ns);
322void nvme_should_fail(struct request *req);
323#else
324static inline void nvme_fault_inject_init(struct nvme_ns *ns) {}
325static inline void nvme_fault_inject_fini(struct nvme_ns *ns) {}
326static inline void nvme_should_fail(struct request *req) {}
327#endif
328
1c63dc66
CH
329static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
330{
331 u32 val = 0;
332
333 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
334 return false;
335 return val & NVME_CSTS_RDY;
336}
337
f3ca80fc
CH
338static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
339{
340 if (!ctrl->subsystem)
341 return -ENOTTY;
342 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
343}
344
f11bb3e2
CH
345static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
346{
347 return (sector >> (ns->lba_shift - 9));
348}
349
6904242d
ML
350static inline void nvme_cleanup_cmd(struct request *req)
351{
f9d03f96
CH
352 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
353 kfree(page_address(req->special_vec.bv_page) +
354 req->special_vec.bv_offset);
355 }
6904242d
ML
356}
357
27fa9bc5
CH
358static inline void nvme_end_request(struct request *req, __le16 status,
359 union nvme_result result)
15a190f7 360{
27fa9bc5 361 struct nvme_request *rq = nvme_req(req);
15a190f7 362
27fa9bc5
CH
363 rq->status = le16_to_cpu(status) >> 1;
364 rq->result = result;
b9e03857
TT
365 /* inject error when permitted by fault injection framework */
366 nvme_should_fail(req);
08e0029a 367 blk_mq_complete_request(req);
7688faa6
CH
368}
369
d22524a4
CH
370static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
371{
372 get_device(ctrl->device);
373}
374
375static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
376{
377 put_device(ctrl->device);
378}
379
77f02a7a 380void nvme_complete_rq(struct request *req);
c55a2fd4 381void nvme_cancel_request(struct request *req, void *data, bool reserved);
bb8d261e
CH
382bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
383 enum nvme_ctrl_state new_state);
5fd4ce1b
CH
384int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
385int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
386int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
f3ca80fc
CH
387int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
388 const struct nvme_ctrl_ops *ops, unsigned long quirks);
53029b04 389void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
d09f2b45
SG
390void nvme_start_ctrl(struct nvme_ctrl *ctrl);
391void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
1673f1f0 392void nvme_put_ctrl(struct nvme_ctrl *ctrl);
7fd8930f 393int nvme_init_identify(struct nvme_ctrl *ctrl);
5bae7f73 394
5955be21 395void nvme_queue_scan(struct nvme_ctrl *ctrl);
5bae7f73 396void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
1673f1f0 397
4f1244c8
CH
398int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
399 bool send);
a98e58e5 400
7bf58533
CH
401void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
402 union nvme_result *res);
f866fc42 403
25646264
KB
404void nvme_stop_queues(struct nvme_ctrl *ctrl);
405void nvme_start_queues(struct nvme_ctrl *ctrl);
69d9a99c 406void nvme_kill_queues(struct nvme_ctrl *ctrl);
302ad8cc
KB
407void nvme_unfreeze(struct nvme_ctrl *ctrl);
408void nvme_wait_freeze(struct nvme_ctrl *ctrl);
409void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
410void nvme_start_freeze(struct nvme_ctrl *ctrl);
31b84460 411int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set);
363c9aac 412
eb71f435 413#define NVME_QID_ANY -1
4160982e 414struct request *nvme_alloc_request(struct request_queue *q,
9a95e4ef 415 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
fc17b653 416blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
8093f7ca 417 struct nvme_command *cmd);
f11bb3e2
CH
418int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
419 void *buf, unsigned bufflen);
420int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
d49187e9 421 union nvme_result *result, void *buffer, unsigned bufflen,
9a95e4ef
BVA
422 unsigned timeout, int qid, int at_head,
423 blk_mq_req_flags_t flags);
9a0be7ab 424int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
038bd4cb
SG
425void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
426void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
d86c4d8e 427int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
79c48ccf 428int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
c5017e85
CH
429int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
430int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
f11bb3e2 431
5b85b826 432extern const struct attribute_group nvme_ns_id_attr_group;
32acab31
CH
433extern const struct block_device_operations nvme_ns_head_ops;
434
435#ifdef CONFIG_NVME_MULTIPATH
436void nvme_failover_req(struct request *req);
908e4564 437bool nvme_req_needs_failover(struct request *req, blk_status_t error);
32acab31
CH
438void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
439int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
440void nvme_mpath_add_disk(struct nvme_ns_head *head);
e9a48034 441void nvme_mpath_add_disk_links(struct nvme_ns *ns);
32acab31 442void nvme_mpath_remove_disk(struct nvme_ns_head *head);
e9a48034 443void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
32acab31
CH
444
445static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
446{
447 struct nvme_ns_head *head = ns->head;
448
449 if (head && ns == srcu_dereference(head->current_path, &head->srcu))
450 rcu_assign_pointer(head->current_path, NULL);
451}
452struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
479a322f
SG
453
454static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
455{
456 struct nvme_ns_head *head = ns->head;
457
458 if (head->disk && list_empty(&head->list))
459 kblockd_schedule_work(&head->requeue_work);
460}
461
32acab31
CH
462#else
463static inline void nvme_failover_req(struct request *req)
464{
465}
908e4564
KB
466static inline bool nvme_req_needs_failover(struct request *req,
467 blk_status_t error)
32acab31
CH
468{
469 return false;
470}
471static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
472{
473}
474static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
475 struct nvme_ns_head *head)
476{
477 return 0;
478}
479static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
480{
481}
482static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
483{
484}
e9a48034
HR
485static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
486{
487}
488static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
489{
490}
32acab31 491static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
479a322f
SG
492{
493}
494static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
32acab31
CH
495{
496}
497#endif /* CONFIG_NVME_MULTIPATH */
498
c4699e70 499#ifdef CONFIG_NVM
3dc87dd0 500int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
b0b4e09c 501void nvme_nvm_unregister(struct nvme_ns *ns);
3dc87dd0
MB
502int nvme_nvm_register_sysfs(struct nvme_ns *ns);
503void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
84d4add7 504int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
c4699e70 505#else
b0b4e09c 506static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
3dc87dd0 507 int node)
c4699e70
KB
508{
509 return 0;
510}
511
b0b4e09c 512static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
3dc87dd0
MB
513static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
514{
515 return 0;
516}
517static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
84d4add7
MB
518static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
519 unsigned long arg)
520{
521 return -ENOTTY;
522}
3dc87dd0
MB
523#endif /* CONFIG_NVM */
524
40267efd
SL
525static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
526{
527 return dev_to_disk(dev)->private_data;
528}
ca064085 529
5bae7f73
CH
530int __init nvme_core_init(void);
531void nvme_core_exit(void);
532
f11bb3e2 533#endif /* _NVME_H */