]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/lib/nvmf/nvmf_internal.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / lib / nvmf / nvmf_internal.h
CommitLineData
7c673cae
FG
1/*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#ifndef __NVMF_INTERNAL_H__
35#define __NVMF_INTERNAL_H__
36
11fdf7f2 37#include "spdk/stdinc.h"
7c673cae 38
11fdf7f2
TL
39#include "spdk/likely.h"
40#include "spdk/nvmf.h"
7c673cae
FG
41#include "spdk/nvmf_spec.h"
42#include "spdk/assert.h"
11fdf7f2 43#include "spdk/bdev.h"
7c673cae
FG
44#include "spdk/queue.h"
45#include "spdk/util.h"
11fdf7f2 46#include "spdk/thread.h"
7c673cae 47
11fdf7f2
TL
48#define SPDK_NVMF_MAX_SGL_ENTRIES 16
49
50enum spdk_nvmf_subsystem_state {
51 SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
52 SPDK_NVMF_SUBSYSTEM_ACTIVATING,
53 SPDK_NVMF_SUBSYSTEM_ACTIVE,
54 SPDK_NVMF_SUBSYSTEM_PAUSING,
55 SPDK_NVMF_SUBSYSTEM_PAUSED,
56 SPDK_NVMF_SUBSYSTEM_RESUMING,
57 SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
58};
59
60enum spdk_nvmf_qpair_state {
61 SPDK_NVMF_QPAIR_UNINITIALIZED = 0,
62 SPDK_NVMF_QPAIR_INACTIVE,
63 SPDK_NVMF_QPAIR_ACTIVATING,
64 SPDK_NVMF_QPAIR_ACTIVE,
65 SPDK_NVMF_QPAIR_DEACTIVATING,
66 SPDK_NVMF_QPAIR_ERROR,
67};
68
69typedef void (*spdk_nvmf_state_change_done)(void *cb_arg, int status);
7c673cae
FG
70
71struct spdk_nvmf_tgt {
11fdf7f2
TL
72 struct spdk_nvmf_tgt_opts opts;
73
7c673cae 74 uint64_t discovery_genctr;
11fdf7f2
TL
75
76 /* Array of subsystem pointers of size max_subsystems indexed by sid */
77 struct spdk_nvmf_subsystem **subsystems;
78
7c673cae
FG
79 struct spdk_nvmf_discovery_log_page *discovery_log_page;
80 size_t discovery_log_page_size;
11fdf7f2
TL
81 TAILQ_HEAD(, spdk_nvmf_transport) transports;
82
83 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn;
84 void *destroy_cb_arg;
85};
86
87struct spdk_nvmf_host {
88 char *nqn;
89 TAILQ_ENTRY(spdk_nvmf_host) link;
90};
91
92struct spdk_nvmf_listener {
93 struct spdk_nvme_transport_id trid;
94 struct spdk_nvmf_transport *transport;
95 TAILQ_ENTRY(spdk_nvmf_listener) link;
96};
97
98struct spdk_nvmf_transport_poll_group {
99 struct spdk_nvmf_transport *transport;
100 TAILQ_ENTRY(spdk_nvmf_transport_poll_group) link;
101};
102
103struct spdk_nvmf_subsystem_poll_group {
104 /* Array of channels for each namespace indexed by nsid - 1 */
105 struct spdk_io_channel **channels;
106 uint32_t num_channels;
107
108 enum spdk_nvmf_subsystem_state state;
109
110 TAILQ_HEAD(, spdk_nvmf_request) queued;
111};
112
113struct spdk_nvmf_poll_group {
114 struct spdk_thread *thread;
115 struct spdk_poller *poller;
116
117 TAILQ_HEAD(, spdk_nvmf_transport_poll_group) tgroups;
118
119 /* Array of poll groups indexed by subsystem id (sid) */
120 struct spdk_nvmf_subsystem_poll_group *sgroups;
121 uint32_t num_sgroups;
122
123 /* All of the queue pairs that belong to this poll group */
124 TAILQ_HEAD(, spdk_nvmf_qpair) qpairs;
125};
126
127typedef enum _spdk_nvmf_request_exec_status {
128 SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE,
129 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS,
130} spdk_nvmf_request_exec_status;
131
132union nvmf_h2c_msg {
133 struct spdk_nvmf_capsule_cmd nvmf_cmd;
134 struct spdk_nvme_cmd nvme_cmd;
135 struct spdk_nvmf_fabric_prop_set_cmd prop_set_cmd;
136 struct spdk_nvmf_fabric_prop_get_cmd prop_get_cmd;
137 struct spdk_nvmf_fabric_connect_cmd connect_cmd;
138};
139SPDK_STATIC_ASSERT(sizeof(union nvmf_h2c_msg) == 64, "Incorrect size");
140
141union nvmf_c2h_msg {
142 struct spdk_nvme_cpl nvme_cpl;
143 struct spdk_nvmf_fabric_prop_get_rsp prop_get_rsp;
144 struct spdk_nvmf_fabric_connect_rsp connect_rsp;
145};
146SPDK_STATIC_ASSERT(sizeof(union nvmf_c2h_msg) == 16, "Incorrect size");
147
148struct spdk_nvmf_request {
149 struct spdk_nvmf_qpair *qpair;
150 uint32_t length;
151 enum spdk_nvme_data_transfer xfer;
152 void *data;
153 union nvmf_h2c_msg *cmd;
154 union nvmf_c2h_msg *rsp;
155 struct iovec iov[SPDK_NVMF_MAX_SGL_ENTRIES];
156 uint32_t iovcnt;
157 struct spdk_bdev_io_wait_entry bdev_io_wait;
158
159 TAILQ_ENTRY(spdk_nvmf_request) link;
160};
161
162struct spdk_nvmf_ns {
163 struct spdk_nvmf_subsystem *subsystem;
164 struct spdk_bdev *bdev;
165 struct spdk_bdev_desc *desc;
166 struct spdk_nvmf_ns_opts opts;
167};
168
169struct spdk_nvmf_qpair {
170 enum spdk_nvmf_qpair_state state;
171 spdk_nvmf_state_change_done state_cb;
172 void *state_cb_arg;
173
174 struct spdk_nvmf_transport *transport;
175 struct spdk_nvmf_ctrlr *ctrlr;
176 struct spdk_nvmf_poll_group *group;
177
178 uint16_t qid;
179 uint16_t sq_head;
180 uint16_t sq_head_max;
181
182 TAILQ_HEAD(, spdk_nvmf_request) outstanding;
183 TAILQ_ENTRY(spdk_nvmf_qpair) link;
184};
185
186struct spdk_nvmf_ctrlr_feat {
187 union spdk_nvme_feat_arbitration arbitration;
188 union spdk_nvme_feat_power_management power_management;
189 union spdk_nvme_feat_error_recovery error_recovery;
190 union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
191 union spdk_nvme_feat_number_of_queues number_of_queues;
192 union spdk_nvme_feat_write_atomicity write_atomicity;
193 union spdk_nvme_feat_async_event_configuration async_event_configuration;
194 union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
195};
196
197/*
198 * This structure represents an NVMe-oF controller,
199 * which is like a "session" in networking terms.
200 */
201struct spdk_nvmf_ctrlr {
202 uint16_t cntlid;
203 struct spdk_nvmf_subsystem *subsys;
204
205 struct {
206 union spdk_nvme_cap_register cap;
207 union spdk_nvme_vs_register vs;
208 union spdk_nvme_cc_register cc;
209 union spdk_nvme_csts_register csts;
210 } vcprop; /* virtual controller properties */
211
212 struct spdk_nvmf_ctrlr_feat feat;
213
214 struct spdk_nvmf_qpair *admin_qpair;
215 struct spdk_thread *thread;
216 struct spdk_bit_array *qpair_mask;
217
218 struct spdk_nvmf_request *aer_req;
219 union spdk_nvme_async_event_completion notice_event;
220 uint8_t hostid[16];
221
222 uint16_t changed_ns_list_count;
223 struct spdk_nvme_ns_list changed_ns_list;
224
225 TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
7c673cae
FG
226};
227
11fdf7f2
TL
228struct spdk_nvmf_subsystem {
229 struct spdk_thread *thread;
230 uint32_t id;
231 enum spdk_nvmf_subsystem_state state;
232
233 char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
234 enum spdk_nvmf_subtype subtype;
235 uint16_t next_cntlid;
236 bool allow_any_host;
237
238 struct spdk_nvmf_tgt *tgt;
239
240 char sn[SPDK_NVME_CTRLR_SN_LEN + 1];
241
242 /* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
243 struct spdk_nvmf_ns **ns;
244 uint32_t max_nsid;
245 /* This is the maximum allowed nsid to a subsystem */
246 uint32_t max_allowed_nsid;
247
248 TAILQ_HEAD(, spdk_nvmf_ctrlr) ctrlrs;
249
250 TAILQ_HEAD(, spdk_nvmf_host) hosts;
251
252 TAILQ_HEAD(, spdk_nvmf_listener) listeners;
253
254 TAILQ_ENTRY(spdk_nvmf_subsystem) entries;
255};
256
257typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
258
259struct spdk_nvmf_transport *spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt,
260 enum spdk_nvme_transport_type);
261
262int spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
263 struct spdk_nvmf_transport *transport);
264int spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
265 struct spdk_nvmf_subsystem *subsystem);
266int spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
267 struct spdk_nvmf_subsystem *subsystem,
268 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
269void spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
270 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
271void spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
272 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
273void spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
274 struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
275void spdk_nvmf_request_exec(struct spdk_nvmf_request *req);
276int spdk_nvmf_request_free(struct spdk_nvmf_request *req);
277int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);
278
279void spdk_nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt,
280 void *buffer, uint64_t offset,
281 uint32_t length);
282
283void spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
284int spdk_nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req);
285int spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
286int spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
287bool spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
288bool spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
289void spdk_nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
290
291void spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata);
292
293int spdk_nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
294 struct spdk_nvmf_ctrlr *ctrlr);
295void spdk_nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
296 struct spdk_nvmf_ctrlr *ctrlr);
297struct spdk_nvmf_ctrlr *spdk_nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
298 uint16_t cntlid);
299int spdk_nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
300
301/*
302 * Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
303 * This function should be called when attempting to recover in error paths when it is OK for
304 * the host to send a subsequent AER.
305 */
306void spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
307
308/*
309 * Free aer simply frees the rdma resources for the aer without informing the host.
310 * This function should be called when deleting a qpair when one wants to make sure
311 * the qpair is completely empty before freeing the request. The reason we free the
312 * AER without sending a completion is to prevent the host from sending another AER.
313 */
314void spdk_nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
7c673cae 315
11fdf7f2
TL
316static inline struct spdk_nvmf_ns *
317_spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
318{
319 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
320 if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
321 return NULL;
322 }
7c673cae 323
11fdf7f2
TL
324 return subsystem->ns[nsid - 1];
325}
7c673cae 326
11fdf7f2
TL
327static inline bool
328spdk_nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
329{
330 return qpair->qid == 0;
331}
7c673cae
FG
332
333#endif /* __NVMF_INTERNAL_H__ */