]>
Commit | Line | Data |
---|---|---|
f11bb3e2 CH |
1 | /* |
2 | * Copyright (c) 2011-2014, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | */ | |
13 | ||
14 | #ifndef _NVME_H | |
15 | #define _NVME_H | |
16 | ||
17 | #include <linux/nvme.h> | |
a6a5149b | 18 | #include <linux/cdev.h> |
f11bb3e2 CH |
19 | #include <linux/pci.h> |
20 | #include <linux/kref.h> | |
21 | #include <linux/blk-mq.h> | |
b0b4e09c | 22 | #include <linux/lightnvm.h> |
a98e58e5 | 23 | #include <linux/sed-opal.h> |
f11bb3e2 | 24 | |
8ae4e447 | 25 | extern unsigned int nvme_io_timeout; |
f11bb3e2 CH |
26 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
27 | ||
8ae4e447 | 28 | extern unsigned int admin_timeout; |
21d34711 CH |
29 | #define ADMIN_TIMEOUT (admin_timeout * HZ) |
30 | ||
038bd4cb SG |
31 | #define NVME_DEFAULT_KATO 5 |
32 | #define NVME_KATO_GRACE 10 | |
33 | ||
9a6327d2 | 34 | extern struct workqueue_struct *nvme_wq; |
b227c59b RS |
35 | extern struct workqueue_struct *nvme_reset_wq; |
36 | extern struct workqueue_struct *nvme_delete_wq; | |
9a6327d2 | 37 | |
ca064085 MB |
38 | enum { |
39 | NVME_NS_LBA = 0, | |
40 | NVME_NS_LIGHTNVM = 1, | |
41 | }; | |
42 | ||
f11bb3e2 | 43 | /* |
106198ed CH |
44 | * List of workarounds for devices that required behavior not specified in |
45 | * the standard. | |
f11bb3e2 | 46 | */ |
106198ed CH |
47 | enum nvme_quirks { |
48 | /* | |
49 | * Prefers I/O aligned to a stripe size specified in a vendor | |
50 | * specific Identify field. | |
51 | */ | |
52 | NVME_QUIRK_STRIPE_SIZE = (1 << 0), | |
540c801c KB |
53 | |
54 | /* | |
55 | * The controller doesn't handle Identify value others than 0 or 1 | |
56 | * correctly. | |
57 | */ | |
58 | NVME_QUIRK_IDENTIFY_CNS = (1 << 1), | |
08095e70 KB |
59 | |
60 | /* | |
e850fd16 CH |
61 | * The controller deterministically returns O's on reads to |
62 | * logical blocks that deallocate was called on. | |
08095e70 | 63 | */ |
e850fd16 | 64 | NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), |
54adc010 GP |
65 | |
66 | /* | |
67 | * The controller needs a delay before starts checking the device | |
68 | * readiness, which is done by reading the NVME_CSTS_RDY bit. | |
69 | */ | |
70 | NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), | |
c5552fde AL |
71 | |
72 | /* | |
73 | * APST should not be used. | |
74 | */ | |
75 | NVME_QUIRK_NO_APST = (1 << 4), | |
ff5350a8 AL |
76 | |
77 | /* | |
78 | * The deepest sleep state should not be used. | |
79 | */ | |
80 | NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), | |
608cc4b1 CH |
81 | |
82 | /* | |
83 | * Supports the LighNVM command set if indicated in vs[1]. | |
84 | */ | |
85 | NVME_QUIRK_LIGHTNVM = (1 << 6), | |
106198ed CH |
86 | }; |
87 | ||
d49187e9 CH |
88 | /* |
89 | * Common request structure for NVMe passthrough. All drivers must have | |
90 | * this structure as the first member of their request-private data. | |
91 | */ | |
92 | struct nvme_request { | |
93 | struct nvme_command *cmd; | |
94 | union nvme_result result; | |
44e44b29 | 95 | u8 retries; |
27fa9bc5 CH |
96 | u8 flags; |
97 | u16 status; | |
98 | }; | |
99 | ||
32acab31 CH |
100 | /* |
101 | * Mark a bio as coming in through the mpath node. | |
102 | */ | |
103 | #define REQ_NVME_MPATH REQ_DRV | |
104 | ||
27fa9bc5 CH |
105 | enum { |
106 | NVME_REQ_CANCELLED = (1 << 0), | |
d49187e9 CH |
107 | }; |
108 | ||
109 | static inline struct nvme_request *nvme_req(struct request *req) | |
110 | { | |
111 | return blk_mq_rq_to_pdu(req); | |
112 | } | |
113 | ||
54adc010 GP |
114 | /* The below value is the specific amount of delay needed before checking |
115 | * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the | |
116 | * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was | |
117 | * found empirically. | |
118 | */ | |
8c97eecc | 119 | #define NVME_QUIRK_DELAY_AMOUNT 2300 |
54adc010 | 120 | |
bb8d261e CH |
121 | enum nvme_ctrl_state { |
122 | NVME_CTRL_NEW, | |
123 | NVME_CTRL_LIVE, | |
2b1b7e78 | 124 | NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ |
bb8d261e | 125 | NVME_CTRL_RESETTING, |
def61eca | 126 | NVME_CTRL_RECONNECTING, |
bb8d261e | 127 | NVME_CTRL_DELETING, |
0ff9d4e1 | 128 | NVME_CTRL_DEAD, |
bb8d261e CH |
129 | }; |
130 | ||
1c63dc66 | 131 | struct nvme_ctrl { |
bb8d261e | 132 | enum nvme_ctrl_state state; |
bd4da3ab | 133 | bool identified; |
bb8d261e | 134 | spinlock_t lock; |
1c63dc66 | 135 | const struct nvme_ctrl_ops *ops; |
f11bb3e2 | 136 | struct request_queue *admin_q; |
07bfcd09 | 137 | struct request_queue *connect_q; |
f11bb3e2 | 138 | struct device *dev; |
f11bb3e2 | 139 | int instance; |
5bae7f73 | 140 | struct blk_mq_tag_set *tagset; |
34b6c231 | 141 | struct blk_mq_tag_set *admin_tagset; |
f11bb3e2 | 142 | struct list_head namespaces; |
69d3b8ac | 143 | struct mutex namespaces_mutex; |
d22524a4 | 144 | struct device ctrl_device; |
5bae7f73 | 145 | struct device *device; /* char device */ |
a6a5149b | 146 | struct cdev cdev; |
d86c4d8e | 147 | struct work_struct reset_work; |
c5017e85 | 148 | struct work_struct delete_work; |
1c63dc66 | 149 | |
ab9e00cc CH |
150 | struct nvme_subsystem *subsys; |
151 | struct list_head subsys_entry; | |
152 | ||
4f1244c8 | 153 | struct opal_dev *opal_dev; |
a98e58e5 | 154 | |
f11bb3e2 | 155 | char name[12]; |
76e3914a | 156 | u16 cntlid; |
5fd4ce1b CH |
157 | |
158 | u32 ctrl_config; | |
b6dccf7f | 159 | u16 mtfa; |
d858e5f0 | 160 | u32 queue_count; |
5fd4ce1b | 161 | |
20d0dfe6 | 162 | u64 cap; |
5fd4ce1b | 163 | u32 page_size; |
f11bb3e2 | 164 | u32 max_hw_sectors; |
f11bb3e2 | 165 | u16 oncs; |
8a9ae523 | 166 | u16 oacs; |
f5d11840 JA |
167 | u16 nssa; |
168 | u16 nr_streams; | |
6bf25d16 | 169 | atomic_t abort_limit; |
f11bb3e2 | 170 | u8 vwc; |
f3ca80fc | 171 | u32 vs; |
07bfcd09 | 172 | u32 sgls; |
038bd4cb | 173 | u16 kas; |
c5552fde AL |
174 | u8 npss; |
175 | u8 apsta; | |
e3d7874d | 176 | u32 aen_result; |
07fbd32a | 177 | unsigned int shutdown_timeout; |
038bd4cb | 178 | unsigned int kato; |
f3ca80fc | 179 | bool subsystem; |
106198ed | 180 | unsigned long quirks; |
c5552fde | 181 | struct nvme_id_power_state psd[32]; |
84fef62d | 182 | struct nvme_effects_log *effects; |
5955be21 | 183 | struct work_struct scan_work; |
f866fc42 | 184 | struct work_struct async_event_work; |
038bd4cb | 185 | struct delayed_work ka_work; |
b6dccf7f | 186 | struct work_struct fw_act_work; |
07bfcd09 | 187 | |
c5552fde AL |
188 | /* Power saving configuration */ |
189 | u64 ps_max_latency_us; | |
76a5af84 | 190 | bool apst_enabled; |
c5552fde | 191 | |
044a9df1 | 192 | /* PCIe only: */ |
fe6d53c9 CH |
193 | u32 hmpre; |
194 | u32 hmmin; | |
044a9df1 CH |
195 | u32 hmminds; |
196 | u16 hmmaxd; | |
fe6d53c9 | 197 | |
07bfcd09 CH |
198 | /* Fabrics only */ |
199 | u16 sqsize; | |
200 | u32 ioccsz; | |
201 | u32 iorcsz; | |
202 | u16 icdoff; | |
203 | u16 maxcmd; | |
fdf9dfa8 | 204 | int nr_reconnects; |
07bfcd09 | 205 | struct nvmf_ctrl_options *opts; |
f11bb3e2 CH |
206 | }; |
207 | ||
ab9e00cc CH |
208 | struct nvme_subsystem { |
209 | int instance; | |
210 | struct device dev; | |
211 | /* | |
212 | * Because we unregister the device on the last put we need | |
213 | * a separate refcount. | |
214 | */ | |
215 | struct kref ref; | |
216 | struct list_head entry; | |
217 | struct mutex lock; | |
218 | struct list_head ctrls; | |
ed754e5d | 219 | struct list_head nsheads; |
ab9e00cc CH |
220 | char subnqn[NVMF_NQN_SIZE]; |
221 | char serial[20]; | |
222 | char model[40]; | |
223 | char firmware_rev[8]; | |
224 | u8 cmic; | |
225 | u16 vendor_id; | |
ed754e5d | 226 | struct ida ns_ida; |
ab9e00cc CH |
227 | }; |
228 | ||
002fab04 CH |
229 | /* |
230 | * Container structure for uniqueue namespace identifiers. | |
231 | */ | |
232 | struct nvme_ns_ids { | |
233 | u8 eui64[8]; | |
234 | u8 nguid[16]; | |
235 | uuid_t uuid; | |
236 | }; | |
237 | ||
ed754e5d CH |
238 | /* |
239 | * Anchor structure for namespaces. There is one for each namespace in a | |
240 | * NVMe subsystem that any of our controllers can see, and the namespace | |
241 | * structure for each controller is chained of it. For private namespaces | |
242 | * there is a 1:1 relation to our namespace structures, that is ->list | |
243 | * only ever has a single entry for private namespaces. | |
244 | */ | |
245 | struct nvme_ns_head { | |
32acab31 CH |
246 | #ifdef CONFIG_NVME_MULTIPATH |
247 | struct gendisk *disk; | |
248 | struct nvme_ns __rcu *current_path; | |
249 | struct bio_list requeue_list; | |
250 | spinlock_t requeue_lock; | |
251 | struct work_struct requeue_work; | |
252 | #endif | |
ed754e5d CH |
253 | struct list_head list; |
254 | struct srcu_struct srcu; | |
255 | struct nvme_subsystem *subsys; | |
256 | unsigned ns_id; | |
257 | struct nvme_ns_ids ids; | |
258 | struct list_head entry; | |
259 | struct kref ref; | |
260 | int instance; | |
261 | }; | |
262 | ||
f11bb3e2 CH |
263 | struct nvme_ns { |
264 | struct list_head list; | |
265 | ||
1c63dc66 | 266 | struct nvme_ctrl *ctrl; |
f11bb3e2 CH |
267 | struct request_queue *queue; |
268 | struct gendisk *disk; | |
ed754e5d | 269 | struct list_head siblings; |
b0b4e09c | 270 | struct nvm_dev *ndev; |
f11bb3e2 | 271 | struct kref kref; |
ed754e5d | 272 | struct nvme_ns_head *head; |
f11bb3e2 | 273 | |
f11bb3e2 CH |
274 | int lba_shift; |
275 | u16 ms; | |
f5d11840 JA |
276 | u16 sgs; |
277 | u32 sws; | |
f11bb3e2 CH |
278 | bool ext; |
279 | u8 pi_type; | |
646017a6 | 280 | unsigned long flags; |
646017a6 | 281 | #define NVME_NS_REMOVING 0 |
69d9a99c | 282 | #define NVME_NS_DEAD 1 |
57eeaf8e | 283 | u16 noiob; |
f11bb3e2 CH |
284 | }; |
285 | ||
1c63dc66 | 286 | struct nvme_ctrl_ops { |
1a353d85 | 287 | const char *name; |
e439bb12 | 288 | struct module *module; |
d3d5b87d CH |
289 | unsigned int flags; |
290 | #define NVME_F_FABRICS (1 << 0) | |
c81bfba9 | 291 | #define NVME_F_METADATA_SUPPORTED (1 << 1) |
1c63dc66 | 292 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); |
5fd4ce1b | 293 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
7fd8930f | 294 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
1673f1f0 | 295 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
ad22c355 | 296 | void (*submit_async_event)(struct nvme_ctrl *ctrl); |
c5017e85 | 297 | void (*delete_ctrl)(struct nvme_ctrl *ctrl); |
1a353d85 | 298 | int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); |
31b84460 | 299 | int (*reinit_request)(void *data, struct request *rq); |
f11bb3e2 CH |
300 | }; |
301 | ||
1c63dc66 CH |
302 | static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) |
303 | { | |
304 | u32 val = 0; | |
305 | ||
306 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) | |
307 | return false; | |
308 | return val & NVME_CSTS_RDY; | |
309 | } | |
310 | ||
f3ca80fc CH |
311 | static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) |
312 | { | |
313 | if (!ctrl->subsystem) | |
314 | return -ENOTTY; | |
315 | return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); | |
316 | } | |
317 | ||
f11bb3e2 CH |
318 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) |
319 | { | |
320 | return (sector >> (ns->lba_shift - 9)); | |
321 | } | |
322 | ||
6904242d ML |
323 | static inline void nvme_cleanup_cmd(struct request *req) |
324 | { | |
f9d03f96 CH |
325 | if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { |
326 | kfree(page_address(req->special_vec.bv_page) + | |
327 | req->special_vec.bv_offset); | |
328 | } | |
6904242d ML |
329 | } |
330 | ||
27fa9bc5 CH |
331 | static inline void nvme_end_request(struct request *req, __le16 status, |
332 | union nvme_result result) | |
15a190f7 | 333 | { |
27fa9bc5 | 334 | struct nvme_request *rq = nvme_req(req); |
15a190f7 | 335 | |
27fa9bc5 CH |
336 | rq->status = le16_to_cpu(status) >> 1; |
337 | rq->result = result; | |
08e0029a | 338 | blk_mq_complete_request(req); |
7688faa6 CH |
339 | } |
340 | ||
d22524a4 CH |
341 | static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) |
342 | { | |
343 | get_device(ctrl->device); | |
344 | } | |
345 | ||
346 | static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) | |
347 | { | |
348 | put_device(ctrl->device); | |
349 | } | |
350 | ||
77f02a7a | 351 | void nvme_complete_rq(struct request *req); |
c55a2fd4 | 352 | void nvme_cancel_request(struct request *req, void *data, bool reserved); |
bb8d261e CH |
353 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
354 | enum nvme_ctrl_state new_state); | |
5fd4ce1b CH |
355 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); |
356 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); | |
357 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); | |
f3ca80fc CH |
358 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
359 | const struct nvme_ctrl_ops *ops, unsigned long quirks); | |
53029b04 | 360 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); |
d09f2b45 SG |
361 | void nvme_start_ctrl(struct nvme_ctrl *ctrl); |
362 | void nvme_stop_ctrl(struct nvme_ctrl *ctrl); | |
1673f1f0 | 363 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); |
7fd8930f | 364 | int nvme_init_identify(struct nvme_ctrl *ctrl); |
5bae7f73 | 365 | |
5955be21 | 366 | void nvme_queue_scan(struct nvme_ctrl *ctrl); |
5bae7f73 | 367 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); |
1673f1f0 | 368 | |
4f1244c8 CH |
369 | int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, |
370 | bool send); | |
a98e58e5 | 371 | |
7bf58533 CH |
372 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, |
373 | union nvme_result *res); | |
f866fc42 | 374 | |
25646264 KB |
375 | void nvme_stop_queues(struct nvme_ctrl *ctrl); |
376 | void nvme_start_queues(struct nvme_ctrl *ctrl); | |
69d9a99c | 377 | void nvme_kill_queues(struct nvme_ctrl *ctrl); |
302ad8cc KB |
378 | void nvme_unfreeze(struct nvme_ctrl *ctrl); |
379 | void nvme_wait_freeze(struct nvme_ctrl *ctrl); | |
380 | void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); | |
381 | void nvme_start_freeze(struct nvme_ctrl *ctrl); | |
31b84460 | 382 | int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set); |
363c9aac | 383 | |
eb71f435 | 384 | #define NVME_QID_ANY -1 |
4160982e | 385 | struct request *nvme_alloc_request(struct request_queue *q, |
9a95e4ef | 386 | struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); |
fc17b653 | 387 | blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, |
8093f7ca | 388 | struct nvme_command *cmd); |
f11bb3e2 CH |
389 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
390 | void *buf, unsigned bufflen); | |
391 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |
d49187e9 | 392 | union nvme_result *result, void *buffer, unsigned bufflen, |
9a95e4ef BVA |
393 | unsigned timeout, int qid, int at_head, |
394 | blk_mq_req_flags_t flags); | |
9a0be7ab | 395 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); |
038bd4cb SG |
396 | void nvme_start_keep_alive(struct nvme_ctrl *ctrl); |
397 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); | |
d86c4d8e | 398 | int nvme_reset_ctrl(struct nvme_ctrl *ctrl); |
79c48ccf | 399 | int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); |
c5017e85 CH |
400 | int nvme_delete_ctrl(struct nvme_ctrl *ctrl); |
401 | int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); | |
f11bb3e2 | 402 | |
5b85b826 | 403 | extern const struct attribute_group nvme_ns_id_attr_group; |
32acab31 CH |
404 | extern const struct block_device_operations nvme_ns_head_ops; |
405 | ||
406 | #ifdef CONFIG_NVME_MULTIPATH | |
407 | void nvme_failover_req(struct request *req); | |
908e4564 | 408 | bool nvme_req_needs_failover(struct request *req, blk_status_t error); |
32acab31 CH |
409 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); |
410 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); | |
411 | void nvme_mpath_add_disk(struct nvme_ns_head *head); | |
e9a48034 | 412 | void nvme_mpath_add_disk_links(struct nvme_ns *ns); |
32acab31 | 413 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); |
e9a48034 | 414 | void nvme_mpath_remove_disk_links(struct nvme_ns *ns); |
32acab31 CH |
415 | |
416 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) | |
417 | { | |
418 | struct nvme_ns_head *head = ns->head; | |
419 | ||
420 | if (head && ns == srcu_dereference(head->current_path, &head->srcu)) | |
421 | rcu_assign_pointer(head->current_path, NULL); | |
422 | } | |
423 | struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); | |
479a322f SG |
424 | |
425 | static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) | |
426 | { | |
427 | struct nvme_ns_head *head = ns->head; | |
428 | ||
429 | if (head->disk && list_empty(&head->list)) | |
430 | kblockd_schedule_work(&head->requeue_work); | |
431 | } | |
432 | ||
32acab31 CH |
433 | #else |
434 | static inline void nvme_failover_req(struct request *req) | |
435 | { | |
436 | } | |
908e4564 KB |
437 | static inline bool nvme_req_needs_failover(struct request *req, |
438 | blk_status_t error) | |
32acab31 CH |
439 | { |
440 | return false; | |
441 | } | |
442 | static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) | |
443 | { | |
444 | } | |
445 | static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, | |
446 | struct nvme_ns_head *head) | |
447 | { | |
448 | return 0; | |
449 | } | |
450 | static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) | |
451 | { | |
452 | } | |
453 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) | |
454 | { | |
455 | } | |
e9a48034 HR |
456 | static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns) |
457 | { | |
458 | } | |
459 | static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns) | |
460 | { | |
461 | } | |
32acab31 | 462 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
479a322f SG |
463 | { |
464 | } | |
465 | static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) | |
32acab31 CH |
466 | { |
467 | } | |
468 | #endif /* CONFIG_NVME_MULTIPATH */ | |
469 | ||
c4699e70 | 470 | #ifdef CONFIG_NVM |
3dc87dd0 | 471 | int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); |
b0b4e09c | 472 | void nvme_nvm_unregister(struct nvme_ns *ns); |
3dc87dd0 MB |
473 | int nvme_nvm_register_sysfs(struct nvme_ns *ns); |
474 | void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); | |
84d4add7 | 475 | int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); |
c4699e70 | 476 | #else |
b0b4e09c | 477 | static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, |
3dc87dd0 | 478 | int node) |
c4699e70 KB |
479 | { |
480 | return 0; | |
481 | } | |
482 | ||
b0b4e09c | 483 | static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; |
3dc87dd0 MB |
484 | static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns) |
485 | { | |
486 | return 0; | |
487 | } | |
488 | static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {}; | |
84d4add7 MB |
489 | static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, |
490 | unsigned long arg) | |
491 | { | |
492 | return -ENOTTY; | |
493 | } | |
3dc87dd0 MB |
494 | #endif /* CONFIG_NVM */ |
495 | ||
40267efd SL |
496 | static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) |
497 | { | |
498 | return dev_to_disk(dev)->private_data; | |
499 | } | |
ca064085 | 500 | |
5bae7f73 CH |
501 | int __init nvme_core_init(void); |
502 | void nvme_core_exit(void); | |
503 | ||
f11bb3e2 | 504 | #endif /* _NVME_H */ |