]>
Commit | Line | Data |
---|---|---|
bc50ad75 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f11bb3e2 CH |
2 | /* |
3 | * Copyright (c) 2011-2014, Intel Corporation. | |
f11bb3e2 CH |
4 | */ |
5 | ||
6 | #ifndef _NVME_H | |
7 | #define _NVME_H | |
8 | ||
9 | #include <linux/nvme.h> | |
a6a5149b | 10 | #include <linux/cdev.h> |
f11bb3e2 CH |
11 | #include <linux/pci.h> |
12 | #include <linux/kref.h> | |
13 | #include <linux/blk-mq.h> | |
b0b4e09c | 14 | #include <linux/lightnvm.h> |
a98e58e5 | 15 | #include <linux/sed-opal.h> |
b9e03857 | 16 | #include <linux/fault-inject.h> |
978628ec | 17 | #include <linux/rcupdate.h> |
c1ac9a4b | 18 | #include <linux/wait.h> |
4d2ce688 | 19 | #include <linux/t10-pi.h> |
f11bb3e2 | 20 | |
35fe0d12 HR |
21 | #include <trace/events/block.h> |
22 | ||
8ae4e447 | 23 | extern unsigned int nvme_io_timeout; |
f11bb3e2 CH |
24 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
25 | ||
8ae4e447 | 26 | extern unsigned int admin_timeout; |
21d34711 CH |
27 | #define ADMIN_TIMEOUT (admin_timeout * HZ) |
28 | ||
038bd4cb SG |
29 | #define NVME_DEFAULT_KATO 5 |
30 | #define NVME_KATO_GRACE 10 | |
31 | ||
38e18002 IR |
32 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
33 | #define NVME_INLINE_SG_CNT 0 | |
ba7ca2ae | 34 | #define NVME_INLINE_METADATA_SG_CNT 0 |
38e18002 IR |
35 | #else |
36 | #define NVME_INLINE_SG_CNT 2 | |
ba7ca2ae | 37 | #define NVME_INLINE_METADATA_SG_CNT 1 |
38e18002 IR |
38 | #endif |
39 | ||
9a6327d2 | 40 | extern struct workqueue_struct *nvme_wq; |
b227c59b RS |
41 | extern struct workqueue_struct *nvme_reset_wq; |
42 | extern struct workqueue_struct *nvme_delete_wq; | |
9a6327d2 | 43 | |
ca064085 MB |
44 | enum { |
45 | NVME_NS_LBA = 0, | |
46 | NVME_NS_LIGHTNVM = 1, | |
47 | }; | |
48 | ||
f11bb3e2 | 49 | /* |
106198ed CH |
50 | * List of workarounds for devices that required behavior not specified in |
51 | * the standard. | |
f11bb3e2 | 52 | */ |
106198ed CH |
53 | enum nvme_quirks { |
54 | /* | |
55 | * Prefers I/O aligned to a stripe size specified in a vendor | |
56 | * specific Identify field. | |
57 | */ | |
58 | NVME_QUIRK_STRIPE_SIZE = (1 << 0), | |
540c801c KB |
59 | |
60 | /* | |
61 | * The controller doesn't handle Identify value others than 0 or 1 | |
62 | * correctly. | |
63 | */ | |
64 | NVME_QUIRK_IDENTIFY_CNS = (1 << 1), | |
08095e70 KB |
65 | |
66 | /* | |
e850fd16 CH |
67 | * The controller deterministically returns O's on reads to |
68 | * logical blocks that deallocate was called on. | |
08095e70 | 69 | */ |
e850fd16 | 70 | NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), |
54adc010 GP |
71 | |
72 | /* | |
73 | * The controller needs a delay before starts checking the device | |
74 | * readiness, which is done by reading the NVME_CSTS_RDY bit. | |
75 | */ | |
76 | NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), | |
c5552fde AL |
77 | |
78 | /* | |
79 | * APST should not be used. | |
80 | */ | |
81 | NVME_QUIRK_NO_APST = (1 << 4), | |
ff5350a8 AL |
82 | |
83 | /* | |
84 | * The deepest sleep state should not be used. | |
85 | */ | |
86 | NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), | |
608cc4b1 CH |
87 | |
88 | /* | |
89 | * Supports the LighNVM command set if indicated in vs[1]. | |
90 | */ | |
91 | NVME_QUIRK_LIGHTNVM = (1 << 6), | |
9abd68ef JA |
92 | |
93 | /* | |
94 | * Set MEDIUM priority on SQ creation | |
95 | */ | |
96 | NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), | |
6299358d JD |
97 | |
98 | /* | |
99 | * Ignore device provided subnqn. | |
100 | */ | |
101 | NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), | |
7b210e4e CH |
102 | |
103 | /* | |
104 | * Broken Write Zeroes. | |
105 | */ | |
106 | NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), | |
cb32de1b ML |
107 | |
108 | /* | |
109 | * Force simple suspend/resume path. | |
110 | */ | |
111 | NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), | |
7ad67ca5 | 112 | |
66341331 BH |
113 | /* |
114 | * Use only one interrupt vector for all queues | |
115 | */ | |
7ad67ca5 | 116 | NVME_QUIRK_SINGLE_VECTOR = (1 << 11), |
66341331 BH |
117 | |
118 | /* | |
119 | * Use non-standard 128 bytes SQEs. | |
120 | */ | |
7ad67ca5 | 121 | NVME_QUIRK_128_BYTES_SQES = (1 << 12), |
d38e9f04 BH |
122 | |
123 | /* | |
124 | * Prevent tag overlap between queues | |
125 | */ | |
7ad67ca5 | 126 | NVME_QUIRK_SHARED_TAGS = (1 << 13), |
6c6aa2f2 AM |
127 | |
128 | /* | |
129 | * Don't change the value of the temperature threshold feature | |
130 | */ | |
131 | NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), | |
106198ed CH |
132 | }; |
133 | ||
d49187e9 CH |
134 | /* |
135 | * Common request structure for NVMe passthrough. All drivers must have | |
136 | * this structure as the first member of their request-private data. | |
137 | */ | |
138 | struct nvme_request { | |
139 | struct nvme_command *cmd; | |
140 | union nvme_result result; | |
44e44b29 | 141 | u8 retries; |
27fa9bc5 CH |
142 | u8 flags; |
143 | u16 status; | |
59e29ce6 | 144 | struct nvme_ctrl *ctrl; |
27fa9bc5 CH |
145 | }; |
146 | ||
32acab31 CH |
147 | /* |
148 | * Mark a bio as coming in through the mpath node. | |
149 | */ | |
150 | #define REQ_NVME_MPATH REQ_DRV | |
151 | ||
27fa9bc5 CH |
152 | enum { |
153 | NVME_REQ_CANCELLED = (1 << 0), | |
bb06ec31 | 154 | NVME_REQ_USERCMD = (1 << 1), |
d49187e9 CH |
155 | }; |
156 | ||
157 | static inline struct nvme_request *nvme_req(struct request *req) | |
158 | { | |
159 | return blk_mq_rq_to_pdu(req); | |
160 | } | |
161 | ||
5d87eb94 KB |
162 | static inline u16 nvme_req_qid(struct request *req) |
163 | { | |
164 | if (!req->rq_disk) | |
165 | return 0; | |
166 | return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1; | |
167 | } | |
168 | ||
54adc010 GP |
169 | /* The below value is the specific amount of delay needed before checking |
170 | * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the | |
171 | * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was | |
172 | * found empirically. | |
173 | */ | |
8c97eecc | 174 | #define NVME_QUIRK_DELAY_AMOUNT 2300 |
54adc010 | 175 | |
bb8d261e CH |
176 | enum nvme_ctrl_state { |
177 | NVME_CTRL_NEW, | |
178 | NVME_CTRL_LIVE, | |
179 | NVME_CTRL_RESETTING, | |
ad6a0a52 | 180 | NVME_CTRL_CONNECTING, |
bb8d261e | 181 | NVME_CTRL_DELETING, |
0ff9d4e1 | 182 | NVME_CTRL_DEAD, |
bb8d261e CH |
183 | }; |
184 | ||
a3646451 AM |
185 | struct nvme_fault_inject { |
186 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | |
187 | struct fault_attr attr; | |
188 | struct dentry *parent; | |
189 | bool dont_retry; /* DNR, do not retry */ | |
190 | u16 status; /* status code */ | |
191 | #endif | |
192 | }; | |
193 | ||
be93e87e KB |
194 | struct nvme_cel { |
195 | struct list_head entry; | |
196 | struct nvme_effects_log log; | |
197 | u8 csi; | |
198 | }; | |
199 | ||
1c63dc66 | 200 | struct nvme_ctrl { |
6e3ca03e | 201 | bool comp_seen; |
bb8d261e | 202 | enum nvme_ctrl_state state; |
bd4da3ab | 203 | bool identified; |
bb8d261e | 204 | spinlock_t lock; |
e7ad43c3 | 205 | struct mutex scan_lock; |
1c63dc66 | 206 | const struct nvme_ctrl_ops *ops; |
f11bb3e2 | 207 | struct request_queue *admin_q; |
07bfcd09 | 208 | struct request_queue *connect_q; |
e7832cb4 | 209 | struct request_queue *fabrics_q; |
f11bb3e2 | 210 | struct device *dev; |
f11bb3e2 | 211 | int instance; |
103e515e | 212 | int numa_node; |
5bae7f73 | 213 | struct blk_mq_tag_set *tagset; |
34b6c231 | 214 | struct blk_mq_tag_set *admin_tagset; |
f11bb3e2 | 215 | struct list_head namespaces; |
765cc031 | 216 | struct rw_semaphore namespaces_rwsem; |
d22524a4 | 217 | struct device ctrl_device; |
5bae7f73 | 218 | struct device *device; /* char device */ |
a6a5149b | 219 | struct cdev cdev; |
d86c4d8e | 220 | struct work_struct reset_work; |
c5017e85 | 221 | struct work_struct delete_work; |
c1ac9a4b | 222 | wait_queue_head_t state_wq; |
1c63dc66 | 223 | |
ab9e00cc CH |
224 | struct nvme_subsystem *subsys; |
225 | struct list_head subsys_entry; | |
226 | ||
4f1244c8 | 227 | struct opal_dev *opal_dev; |
a98e58e5 | 228 | |
f11bb3e2 | 229 | char name[12]; |
76e3914a | 230 | u16 cntlid; |
5fd4ce1b CH |
231 | |
232 | u32 ctrl_config; | |
b6dccf7f | 233 | u16 mtfa; |
d858e5f0 | 234 | u32 queue_count; |
5fd4ce1b | 235 | |
20d0dfe6 | 236 | u64 cap; |
5fd4ce1b | 237 | u32 page_size; |
f11bb3e2 | 238 | u32 max_hw_sectors; |
943e942e | 239 | u32 max_segments; |
95093350 | 240 | u32 max_integrity_segments; |
49cd84b6 | 241 | u16 crdt[3]; |
f11bb3e2 | 242 | u16 oncs; |
8a9ae523 | 243 | u16 oacs; |
f5d11840 JA |
244 | u16 nssa; |
245 | u16 nr_streams; | |
f968688f | 246 | u16 sqsize; |
0d0b660f | 247 | u32 max_namespaces; |
6bf25d16 | 248 | atomic_t abort_limit; |
f11bb3e2 | 249 | u8 vwc; |
f3ca80fc | 250 | u32 vs; |
07bfcd09 | 251 | u32 sgls; |
038bd4cb | 252 | u16 kas; |
c5552fde AL |
253 | u8 npss; |
254 | u8 apsta; | |
400b6a7b GR |
255 | u16 wctemp; |
256 | u16 cctemp; | |
c0561f82 | 257 | u32 oaes; |
e3d7874d | 258 | u32 aen_result; |
3e53ba38 | 259 | u32 ctratt; |
07fbd32a | 260 | unsigned int shutdown_timeout; |
038bd4cb | 261 | unsigned int kato; |
f3ca80fc | 262 | bool subsystem; |
106198ed | 263 | unsigned long quirks; |
c5552fde | 264 | struct nvme_id_power_state psd[32]; |
84fef62d | 265 | struct nvme_effects_log *effects; |
be93e87e | 266 | struct list_head cels; |
5955be21 | 267 | struct work_struct scan_work; |
f866fc42 | 268 | struct work_struct async_event_work; |
038bd4cb | 269 | struct delayed_work ka_work; |
0a34e466 | 270 | struct nvme_command ka_cmd; |
b6dccf7f | 271 | struct work_struct fw_act_work; |
30d90964 | 272 | unsigned long events; |
ce151813 | 273 | bool created; |
07bfcd09 | 274 | |
0d0b660f CH |
275 | #ifdef CONFIG_NVME_MULTIPATH |
276 | /* asymmetric namespace access: */ | |
277 | u8 anacap; | |
278 | u8 anatt; | |
279 | u32 anagrpmax; | |
280 | u32 nanagrpid; | |
281 | struct mutex ana_lock; | |
282 | struct nvme_ana_rsp_hdr *ana_log_buf; | |
283 | size_t ana_log_size; | |
284 | struct timer_list anatt_timer; | |
285 | struct work_struct ana_work; | |
286 | #endif | |
287 | ||
c5552fde AL |
288 | /* Power saving configuration */ |
289 | u64 ps_max_latency_us; | |
76a5af84 | 290 | bool apst_enabled; |
c5552fde | 291 | |
044a9df1 | 292 | /* PCIe only: */ |
fe6d53c9 CH |
293 | u32 hmpre; |
294 | u32 hmmin; | |
044a9df1 CH |
295 | u32 hmminds; |
296 | u16 hmmaxd; | |
fe6d53c9 | 297 | |
07bfcd09 | 298 | /* Fabrics only */ |
07bfcd09 CH |
299 | u32 ioccsz; |
300 | u32 iorcsz; | |
301 | u16 icdoff; | |
302 | u16 maxcmd; | |
fdf9dfa8 | 303 | int nr_reconnects; |
07bfcd09 | 304 | struct nvmf_ctrl_options *opts; |
cb5b7262 JA |
305 | |
306 | struct page *discard_page; | |
307 | unsigned long discard_page_busy; | |
f79d5fda AM |
308 | |
309 | struct nvme_fault_inject fault_inject; | |
f11bb3e2 CH |
310 | }; |
311 | ||
75c10e73 HR |
312 | enum nvme_iopolicy { |
313 | NVME_IOPOLICY_NUMA, | |
314 | NVME_IOPOLICY_RR, | |
315 | }; | |
316 | ||
ab9e00cc CH |
317 | struct nvme_subsystem { |
318 | int instance; | |
319 | struct device dev; | |
320 | /* | |
321 | * Because we unregister the device on the last put we need | |
322 | * a separate refcount. | |
323 | */ | |
324 | struct kref ref; | |
325 | struct list_head entry; | |
326 | struct mutex lock; | |
327 | struct list_head ctrls; | |
ed754e5d | 328 | struct list_head nsheads; |
ab9e00cc CH |
329 | char subnqn[NVMF_NQN_SIZE]; |
330 | char serial[20]; | |
331 | char model[40]; | |
332 | char firmware_rev[8]; | |
333 | u8 cmic; | |
334 | u16 vendor_id; | |
81adb863 | 335 | u16 awupf; /* 0's based awupf value. */ |
ed754e5d | 336 | struct ida ns_ida; |
75c10e73 HR |
337 | #ifdef CONFIG_NVME_MULTIPATH |
338 | enum nvme_iopolicy iopolicy; | |
339 | #endif | |
ab9e00cc CH |
340 | }; |
341 | ||
002fab04 CH |
342 | /* |
343 | * Container structure for uniqueue namespace identifiers. | |
344 | */ | |
345 | struct nvme_ns_ids { | |
346 | u8 eui64[8]; | |
347 | u8 nguid[16]; | |
348 | uuid_t uuid; | |
71010c30 | 349 | u8 csi; |
002fab04 CH |
350 | }; |
351 | ||
ed754e5d CH |
352 | /* |
353 | * Anchor structure for namespaces. There is one for each namespace in a | |
354 | * NVMe subsystem that any of our controllers can see, and the namespace | |
355 | * structure for each controller is chained of it. For private namespaces | |
356 | * there is a 1:1 relation to our namespace structures, that is ->list | |
357 | * only ever has a single entry for private namespaces. | |
358 | */ | |
359 | struct nvme_ns_head { | |
360 | struct list_head list; | |
361 | struct srcu_struct srcu; | |
362 | struct nvme_subsystem *subsys; | |
363 | unsigned ns_id; | |
364 | struct nvme_ns_ids ids; | |
365 | struct list_head entry; | |
366 | struct kref ref; | |
0c284db7 | 367 | bool shared; |
ed754e5d | 368 | int instance; |
be93e87e | 369 | struct nvme_effects_log *effects; |
f3334447 CH |
370 | #ifdef CONFIG_NVME_MULTIPATH |
371 | struct gendisk *disk; | |
372 | struct bio_list requeue_list; | |
373 | spinlock_t requeue_lock; | |
374 | struct work_struct requeue_work; | |
375 | struct mutex lock; | |
d8a22f85 AE |
376 | unsigned long flags; |
377 | #define NVME_NSHEAD_DISK_LIVE 0 | |
f3334447 CH |
378 | struct nvme_ns __rcu *current_path[]; |
379 | #endif | |
ed754e5d CH |
380 | }; |
381 | ||
ffc89b1d MG |
382 | enum nvme_ns_features { |
383 | NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ | |
b29f8485 | 384 | NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ |
ffc89b1d MG |
385 | }; |
386 | ||
f11bb3e2 CH |
387 | struct nvme_ns { |
388 | struct list_head list; | |
389 | ||
1c63dc66 | 390 | struct nvme_ctrl *ctrl; |
f11bb3e2 CH |
391 | struct request_queue *queue; |
392 | struct gendisk *disk; | |
0d0b660f CH |
393 | #ifdef CONFIG_NVME_MULTIPATH |
394 | enum nvme_ana_state ana_state; | |
395 | u32 ana_grpid; | |
396 | #endif | |
ed754e5d | 397 | struct list_head siblings; |
b0b4e09c | 398 | struct nvm_dev *ndev; |
f11bb3e2 | 399 | struct kref kref; |
ed754e5d | 400 | struct nvme_ns_head *head; |
f11bb3e2 | 401 | |
f11bb3e2 CH |
402 | int lba_shift; |
403 | u16 ms; | |
f5d11840 JA |
404 | u16 sgs; |
405 | u32 sws; | |
f11bb3e2 | 406 | u8 pi_type; |
ffc89b1d | 407 | unsigned long features; |
646017a6 | 408 | unsigned long flags; |
0d0b660f CH |
409 | #define NVME_NS_REMOVING 0 |
410 | #define NVME_NS_DEAD 1 | |
411 | #define NVME_NS_ANA_PENDING 2 | |
b9e03857 | 412 | |
b9e03857 | 413 | struct nvme_fault_inject fault_inject; |
b9e03857 | 414 | |
f11bb3e2 CH |
415 | }; |
416 | ||
4d2ce688 JS |
417 | /* NVMe ns supports metadata actions by the controller (generate/strip) */ |
418 | static inline bool nvme_ns_has_pi(struct nvme_ns *ns) | |
419 | { | |
420 | return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); | |
421 | } | |
422 | ||
1c63dc66 | 423 | struct nvme_ctrl_ops { |
1a353d85 | 424 | const char *name; |
e439bb12 | 425 | struct module *module; |
d3d5b87d CH |
426 | unsigned int flags; |
427 | #define NVME_F_FABRICS (1 << 0) | |
c81bfba9 | 428 | #define NVME_F_METADATA_SUPPORTED (1 << 1) |
e0596ab2 | 429 | #define NVME_F_PCI_P2PDMA (1 << 2) |
1c63dc66 | 430 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); |
5fd4ce1b | 431 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
7fd8930f | 432 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
1673f1f0 | 433 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
ad22c355 | 434 | void (*submit_async_event)(struct nvme_ctrl *ctrl); |
c5017e85 | 435 | void (*delete_ctrl)(struct nvme_ctrl *ctrl); |
1a353d85 | 436 | int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); |
f11bb3e2 CH |
437 | }; |
438 | ||
b9e03857 | 439 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
a3646451 AM |
440 | void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, |
441 | const char *dev_name); | |
442 | void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); | |
b9e03857 TT |
443 | void nvme_should_fail(struct request *req); |
444 | #else | |
a3646451 AM |
445 | static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, |
446 | const char *dev_name) | |
447 | { | |
448 | } | |
449 | static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) | |
450 | { | |
451 | } | |
b9e03857 TT |
452 | static inline void nvme_should_fail(struct request *req) {} |
453 | #endif | |
454 | ||
f3ca80fc CH |
455 | static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) |
456 | { | |
457 | if (!ctrl->subsystem) | |
458 | return -ENOTTY; | |
459 | return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); | |
460 | } | |
461 | ||
314d48dd DLM |
462 | /* |
463 | * Convert a 512B sector number to a device logical block number. | |
464 | */ | |
465 | static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) | |
f11bb3e2 | 466 | { |
314d48dd | 467 | return sector >> (ns->lba_shift - SECTOR_SHIFT); |
f11bb3e2 CH |
468 | } |
469 | ||
e08f2ae8 DLM |
470 | /* |
471 | * Convert a device logical block number to a 512B sector number. | |
472 | */ | |
473 | static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) | |
f11bb3e2 | 474 | { |
e08f2ae8 | 475 | return lba << (ns->lba_shift - SECTOR_SHIFT); |
f11bb3e2 CH |
476 | } |
477 | ||
71fb90eb KB |
478 | /* |
479 | * Convert byte length to nvme's 0-based num dwords | |
480 | */ | |
481 | static inline u32 nvme_bytes_to_numd(size_t len) | |
482 | { | |
483 | return (len >> 2) - 1; | |
484 | } | |
485 | ||
ff029451 | 486 | static inline bool nvme_end_request(struct request *req, __le16 status, |
27fa9bc5 | 487 | union nvme_result result) |
15a190f7 | 488 | { |
27fa9bc5 | 489 | struct nvme_request *rq = nvme_req(req); |
15a190f7 | 490 | |
27fa9bc5 CH |
491 | rq->status = le16_to_cpu(status) >> 1; |
492 | rq->result = result; | |
b9e03857 TT |
493 | /* inject error when permitted by fault injection framework */ |
494 | nvme_should_fail(req); | |
ff029451 CH |
495 | if (unlikely(blk_should_fake_timeout(req->q))) |
496 | return true; | |
497 | return blk_mq_complete_request_remote(req); | |
7688faa6 CH |
498 | } |
499 | ||
d22524a4 CH |
500 | static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) |
501 | { | |
502 | get_device(ctrl->device); | |
503 | } | |
504 | ||
505 | static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) | |
506 | { | |
507 | put_device(ctrl->device); | |
508 | } | |
509 | ||
58a8df67 IR |
510 | static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) |
511 | { | |
512 | return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH; | |
513 | } | |
514 | ||
77f02a7a | 515 | void nvme_complete_rq(struct request *req); |
7baa8572 | 516 | bool nvme_cancel_request(struct request *req, void *data, bool reserved); |
bb8d261e CH |
517 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
518 | enum nvme_ctrl_state new_state); | |
c1ac9a4b | 519 | bool nvme_wait_reset(struct nvme_ctrl *ctrl); |
b5b05048 | 520 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl); |
c0f2f45b | 521 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl); |
5fd4ce1b | 522 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); |
f3ca80fc CH |
523 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
524 | const struct nvme_ctrl_ops *ops, unsigned long quirks); | |
53029b04 | 525 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); |
d09f2b45 SG |
526 | void nvme_start_ctrl(struct nvme_ctrl *ctrl); |
527 | void nvme_stop_ctrl(struct nvme_ctrl *ctrl); | |
7fd8930f | 528 | int nvme_init_identify(struct nvme_ctrl *ctrl); |
5bae7f73 | 529 | |
5bae7f73 | 530 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); |
1673f1f0 | 531 | |
4f1244c8 CH |
532 | int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, |
533 | bool send); | |
a98e58e5 | 534 | |
7bf58533 | 535 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, |
287a63eb | 536 | volatile union nvme_result *res); |
f866fc42 | 537 | |
25646264 KB |
538 | void nvme_stop_queues(struct nvme_ctrl *ctrl); |
539 | void nvme_start_queues(struct nvme_ctrl *ctrl); | |
69d9a99c | 540 | void nvme_kill_queues(struct nvme_ctrl *ctrl); |
d6135c3a | 541 | void nvme_sync_queues(struct nvme_ctrl *ctrl); |
302ad8cc KB |
542 | void nvme_unfreeze(struct nvme_ctrl *ctrl); |
543 | void nvme_wait_freeze(struct nvme_ctrl *ctrl); | |
544 | void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); | |
545 | void nvme_start_freeze(struct nvme_ctrl *ctrl); | |
363c9aac | 546 | |
eb71f435 | 547 | #define NVME_QID_ANY -1 |
4160982e | 548 | struct request *nvme_alloc_request(struct request_queue *q, |
9a95e4ef | 549 | struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); |
f7f1fc36 | 550 | void nvme_cleanup_cmd(struct request *req); |
fc17b653 | 551 | blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, |
8093f7ca | 552 | struct nvme_command *cmd); |
f11bb3e2 CH |
553 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
554 | void *buf, unsigned bufflen); | |
555 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |
d49187e9 | 556 | union nvme_result *result, void *buffer, unsigned bufflen, |
9a95e4ef | 557 | unsigned timeout, int qid, int at_head, |
6287b51c | 558 | blk_mq_req_flags_t flags, bool poll); |
1a87ee65 KB |
559 | int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, |
560 | unsigned int dword11, void *buffer, size_t buflen, | |
561 | u32 *result); | |
562 | int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, | |
563 | unsigned int dword11, void *buffer, size_t buflen, | |
564 | u32 *result); | |
9a0be7ab | 565 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); |
038bd4cb | 566 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); |
d86c4d8e | 567 | int nvme_reset_ctrl(struct nvme_ctrl *ctrl); |
79c48ccf | 568 | int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); |
c1ac9a4b | 569 | int nvme_try_sched_reset(struct nvme_ctrl *ctrl); |
c5017e85 | 570 | int nvme_delete_ctrl(struct nvme_ctrl *ctrl); |
f11bb3e2 | 571 | |
be93e87e | 572 | int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, |
0e98719b | 573 | void *log, size_t size, u64 offset); |
d558fb51 | 574 | |
33b14f67 | 575 | extern const struct attribute_group *nvme_ns_id_attr_groups[]; |
32acab31 CH |
576 | extern const struct block_device_operations nvme_ns_head_ops; |
577 | ||
578 | #ifdef CONFIG_NVME_MULTIPATH | |
66b20ac0 MR |
579 | static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) |
580 | { | |
581 | return ctrl->ana_log_buf != NULL; | |
582 | } | |
583 | ||
b9156dae SG |
584 | void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); |
585 | void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); | |
586 | void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); | |
a785dbcc KB |
587 | void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, |
588 | struct nvme_ctrl *ctrl, int *flags); | |
764e9332 | 589 | bool nvme_failover_req(struct request *req); |
32acab31 CH |
590 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); |
591 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); | |
0d0b660f | 592 | void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); |
32acab31 | 593 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); |
0d0b660f CH |
594 | int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); |
595 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl); | |
596 | void nvme_mpath_stop(struct nvme_ctrl *ctrl); | |
0157ec8d SG |
597 | bool nvme_mpath_clear_current_path(struct nvme_ns *ns); |
598 | void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); | |
32acab31 | 599 | struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); |
c62b37d9 | 600 | blk_qc_t nvme_ns_head_submit_bio(struct bio *bio); |
479a322f SG |
601 | |
602 | static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) | |
603 | { | |
604 | struct nvme_ns_head *head = ns->head; | |
605 | ||
606 | if (head->disk && list_empty(&head->list)) | |
607 | kblockd_schedule_work(&head->requeue_work); | |
608 | } | |
609 | ||
35fe0d12 HR |
610 | static inline void nvme_trace_bio_complete(struct request *req, |
611 | blk_status_t status) | |
612 | { | |
613 | struct nvme_ns *ns = req->q->queuedata; | |
614 | ||
615 | if (req->cmd_flags & REQ_NVME_MPATH) | |
d24de76a | 616 | trace_block_bio_complete(ns->head->disk->queue, req->bio); |
35fe0d12 HR |
617 | } |
618 | ||
0d0b660f CH |
619 | extern struct device_attribute dev_attr_ana_grpid; |
620 | extern struct device_attribute dev_attr_ana_state; | |
75c10e73 | 621 | extern struct device_attribute subsys_attr_iopolicy; |
0d0b660f | 622 | |
32acab31 | 623 | #else |
0d0b660f CH |
624 | static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) |
625 | { | |
626 | return false; | |
627 | } | |
a785dbcc KB |
628 | /* |
629 | * Without the multipath code enabled, multiple controller per subsystems are | |
630 | * visible as devices and thus we cannot use the subsystem instance. | |
631 | */ | |
632 | static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, | |
633 | struct nvme_ctrl *ctrl, int *flags) | |
634 | { | |
635 | sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); | |
636 | } | |
637 | ||
764e9332 | 638 | static inline bool nvme_failover_req(struct request *req) |
32acab31 | 639 | { |
764e9332 | 640 | return false; |
32acab31 | 641 | } |
32acab31 CH |
642 | static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) |
643 | { | |
644 | } | |
645 | static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, | |
646 | struct nvme_ns_head *head) | |
647 | { | |
648 | return 0; | |
649 | } | |
0d0b660f CH |
650 | static inline void nvme_mpath_add_disk(struct nvme_ns *ns, |
651 | struct nvme_id_ns *id) | |
32acab31 CH |
652 | { |
653 | } | |
654 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) | |
655 | { | |
656 | } | |
0157ec8d SG |
657 | static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) |
658 | { | |
659 | return false; | |
660 | } | |
661 | static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) | |
479a322f SG |
662 | { |
663 | } | |
664 | static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) | |
32acab31 CH |
665 | { |
666 | } | |
35fe0d12 HR |
667 | static inline void nvme_trace_bio_complete(struct request *req, |
668 | blk_status_t status) | |
669 | { | |
670 | } | |
0d0b660f CH |
671 | static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, |
672 | struct nvme_id_ctrl *id) | |
673 | { | |
14a1336e CH |
674 | if (ctrl->subsys->cmic & (1 << 3)) |
675 | dev_warn(ctrl->device, | |
676 | "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); | |
0d0b660f CH |
677 | return 0; |
678 | } | |
679 | static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) | |
680 | { | |
681 | } | |
682 | static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) | |
683 | { | |
684 | } | |
b9156dae SG |
685 | static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) |
686 | { | |
687 | } | |
688 | static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) | |
689 | { | |
690 | } | |
691 | static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) | |
692 | { | |
693 | } | |
32acab31 CH |
694 | #endif /* CONFIG_NVME_MULTIPATH */ |
695 | ||
c4699e70 | 696 | #ifdef CONFIG_NVM |
3dc87dd0 | 697 | int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); |
b0b4e09c | 698 | void nvme_nvm_unregister(struct nvme_ns *ns); |
33b14f67 | 699 | extern const struct attribute_group nvme_nvm_attr_group; |
84d4add7 | 700 | int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); |
c4699e70 | 701 | #else |
b0b4e09c | 702 | static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, |
3dc87dd0 | 703 | int node) |
c4699e70 KB |
704 | { |
705 | return 0; | |
706 | } | |
707 | ||
b0b4e09c | 708 | static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; |
84d4add7 MB |
709 | static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, |
710 | unsigned long arg) | |
711 | { | |
712 | return -ENOTTY; | |
713 | } | |
3dc87dd0 MB |
714 | #endif /* CONFIG_NVM */ |
715 | ||
40267efd SL |
716 | static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) |
717 | { | |
718 | return dev_to_disk(dev)->private_data; | |
719 | } | |
ca064085 | 720 | |
400b6a7b GR |
721 | #ifdef CONFIG_NVME_HWMON |
722 | void nvme_hwmon_init(struct nvme_ctrl *ctrl); | |
723 | #else | |
724 | static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { } | |
725 | #endif | |
726 | ||
f11bb3e2 | 727 | #endif /* _NVME_H */ |